lkml.org 
[lkml]   [1999]   [Oct]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch, preliminary] move per-cpu variables into a structure
below is my preliminary version of that patch, what do you think?

TODO:
* mark boot_cpu_data as __init:
on SMP, boot_cpu_data has nothing to do with the boot cpu: AFAICS it
contains the info of the last cpu which was started. On UP, it's now
redundant: current_cpu_data or cpu_data[smp_processor_id()] could be
used instead. Unfortunately, is used by several drivers to get the cache
line len.
* I've left the following arrays outside cpu_state:
cpu_logical_map: cross-platform array, used by kernel/sched.c
__cpu_logical_map: The offset it not a cpu id, it doesn't belong into
cpu_state.
init_tss: ?? I'll move it into cpu_state before submitting the patch to
Ingo/Linus.

* fix accessing the nmi counter: I overlooked one cpu_logical_map.
* cacheline align the array.

--
Manfred

(Thanks Alan, I've checked the MP spec and removed the loop)diff -ur 2.3/arch/i386/kernel/i386_ksyms.c build-2.3/arch/i386/kernel/i386_ksyms.c
--- 2.3/arch/i386/kernel/i386_ksyms.c Thu Oct 28 15:01:23 1999
+++ build-2.3/arch/i386/kernel/i386_ksyms.c Fri Oct 29 17:43:16 1999
@@ -30,6 +30,7 @@

/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
+EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(EISA_bus);
EXPORT_SYMBOL(MCA_bus);
EXPORT_SYMBOL(__verify_write);
@@ -38,8 +39,6 @@
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__io_virt_debug);
-EXPORT_SYMBOL(local_bh_count);
-EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
@@ -84,7 +83,6 @@
#endif

#ifdef __SMP__
-EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);
diff -ur 2.3/arch/i386/kernel/io_apic.c build-2.3/arch/i386/kernel/io_apic.c
--- 2.3/arch/i386/kernel/io_apic.c Sat Oct 9 22:51:26 1999
+++ build-2.3/arch/i386/kernel/io_apic.c Fri Oct 29 17:43:16 1999
@@ -1111,20 +1111,19 @@
return 0;
}

-extern atomic_t nmi_counter[NR_CPUS];
-
static int __init nmi_irq_works(void)
{
atomic_t tmp[NR_CPUS];
int j, cpu;

- memcpy(tmp, nmi_counter, sizeof(tmp));
+ for(j=0;j<NR_CPUS;j++)
+ tmp[j] = cpu_data[j].nmi_counter;
sti();
mdelay(50);

for (j = 0; j < smp_num_cpus; j++) {
cpu = cpu_logical_map(j);
- if (atomic_read(nmi_counter+cpu) - atomic_read(tmp+cpu) <= 3) {
+ if (atomic_read(&cpu_data[cpu].nmi_counter) - atomic_read(&tmp[cpu]) <= 3) {
printk("CPU#%d NMI appears to be stuck.\n", cpu);
return 0;
}
diff -ur 2.3/arch/i386/kernel/irq.c build-2.3/arch/i386/kernel/irq.c
--- 2.3/arch/i386/kernel/irq.c Fri Oct 22 22:58:22 1999
+++ build-2.3/arch/i386/kernel/irq.c Fri Oct 29 17:43:16 1999
@@ -43,12 +43,6 @@
#include <asm/irq.h>
#include <linux/irq.h>

-
-unsigned int local_bh_count[NR_CPUS];
-unsigned int local_irq_count[NR_CPUS];
-
-extern atomic_t nmi_counter[NR_CPUS];
-
/*
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
@@ -166,13 +160,13 @@
p += sprintf(p, "NMI: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
- atomic_read(nmi_counter+cpu_logical_map(j)));
+ atomic_read(&cpu_data[j].nmi_counter));
p += sprintf(p, "\n");
#if CONFIG_SMP
p += sprintf(p, "LOC: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
- apic_timer_irqs[cpu_logical_map(j)]);
+ cpu_data[j].apic_timer_irqs);
p += sprintf(p, "\n");
#endif
p += sprintf(p, "ERR: %10lu\n", irq_err_count);
@@ -220,14 +214,21 @@
int cpu = smp_processor_id();

printk("\n%s, CPU %d:\n", str, cpu);
- printk("irq: %d [%d %d]\n",
- atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
- printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
+
+ printk("irq: %d [", atomic_read(&global_irq_count));
+ for(i=0;i<NR_CPUS;i++)
+ printk(" %d\n", cpu_data[i].local_irq_count);
+ printk(" ]\n");
+
+ printk("bh: %d [", atomic_read(&global_bh_count));
+ for(i=0;i<NR_CPUS;i++)
+ printk(" %d\n", cpu_data[i].local_bh_count);
+ printk(" ]\n");
+
stack = (unsigned long *) &stack;
for (i = 40; i ; i--) {
unsigned long x = *++stack;
- if (x > (unsigned long) &get_option && x < (unsigned long) &vsprintf) {
+ if (x > (unsigned long) &_stext && x < (unsigned long) &_etext) {
printk("<[%08lx]> ", x);
}
}
@@ -286,7 +287,7 @@
* already executing in one..
*/
if (!atomic_read(&global_irq_count)) {
- if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
+ if (cpu_data[cpu].local_bh_count || !atomic_read(&global_bh_count))
break;
}

@@ -306,7 +307,7 @@
continue;
if (global_irq_lock)
continue;
- if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
+ if (!cpu_data[cpu].local_bh_count && atomic_read(&global_bh_count))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
@@ -391,7 +392,7 @@
if (flags & (1 << EFLAGS_IF_SHIFT)) {
int cpu = smp_processor_id();
__cli();
- if (!local_irq_count[cpu])
+ if (!cpu_data[cpu].local_irq_count)
get_irqlock(cpu);
}
}
@@ -400,7 +401,7 @@
{
int cpu = smp_processor_id();

- if (!local_irq_count[cpu])
+ if (!cpu_data[cpu].local_irq_count)
release_irqlock(cpu);
__sti();
}
@@ -424,7 +425,7 @@
retval = 2 + local_enabled;

/* check for global flags if we're not in an interrupt */
- if (!local_irq_count[smp_processor_id()]) {
+ if (!current_cpu_data.local_irq_count) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
@@ -515,7 +516,7 @@
{
disable_irq_nosync(irq);

- if (!local_irq_count[smp_processor_id()]) {
+ if (!current_cpu_data.local_irq_count) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
diff -ur 2.3/arch/i386/kernel/process.c build-2.3/arch/i386/kernel/process.c
--- 2.3/arch/i386/kernel/process.c Thu Oct 28 15:01:23 1999
+++ build-2.3/arch/i386/kernel/process.c Fri Oct 29 17:43:16 1999
@@ -81,7 +81,7 @@

while (1) {
while (!current->need_resched) {
- if (!current_cpu_data.hlt_works_ok)
+ if (!current_cpu_data.x86.hlt_works_ok)
continue;
if (hlt_counter)
continue;
diff -ur 2.3/arch/i386/kernel/setup.c build-2.3/arch/i386/kernel/setup.c
--- 2.3/arch/i386/kernel/setup.c Thu Oct 28 15:01:23 1999
+++ build-2.3/arch/i386/kernel/setup.c Fri Oct 29 17:43:16 1999
@@ -79,6 +79,9 @@

unsigned long mmu_cr4_features __initdata = 0;

+/* Per CPU bogomips and other parameters */
+struct cpu_state cpu_data[NR_CPUS];
+
/*
* Bus types ..
*/
@@ -1346,10 +1349,10 @@
"16", "17", "psn", "19", "20", "21", "22", "mmx",
"24", "kni", "26", "27", "28", "29", "30", "31"
};
- struct cpuinfo_x86 *c = cpu_data;
int i, n;

- for(n=0; n<NR_CPUS; n++, c++) {
+ for(n=0; n<NR_CPUS; n++) {
+ struct cpuinfo_x86 *c = &cpu_data[n].x86;
#ifdef __SMP__
if (!(cpu_online_map & (1<<n)))
continue;
diff -ur 2.3/arch/i386/kernel/smp.c build-2.3/arch/i386/kernel/smp.c
--- 2.3/arch/i386/kernel/smp.c Sat Oct 9 22:51:26 1999
+++ build-2.3/arch/i386/kernel/smp.c Fri Oct 29 17:43:16 1999
@@ -479,7 +479,7 @@
*/
clear_bit(smp_processor_id(), &cpu_online_map);

- if (cpu_data[smp_processor_id()].hlt_works_ok)
+ if (cpu_data[smp_processor_id()].x86.hlt_works_ok)
for(;;) __asm__("hlt");
for (;;);
}
@@ -594,10 +594,6 @@
* closely follows bus clocks.
*/

-int prof_multiplier[NR_CPUS] = { 1, };
-int prof_old_multiplier[NR_CPUS] = { 1, };
-int prof_counter[NR_CPUS] = { 1, };
-
/*
* The timer chip is already set up at HZ interrupts per second here,
* but we do not accept timer interrupts yet. We only allow the BP
@@ -833,7 +829,7 @@
* accordingly.
*/
for (i = 0; i < NR_CPUS; ++i)
- prof_multiplier[i] = multiplier;
+ cpu_data[i].prof_multiplier=multiplier;

return 0;
}
@@ -864,7 +860,7 @@
if (!user)
x86_do_profile(regs->eip);

- if (--prof_counter[cpu] <= 0) {
+ if (--cpu_data[cpu].prof_counter <= 0) {
int system = 1 - user;
struct task_struct * p = current;

@@ -876,10 +872,10 @@
*
* Interrupts are already masked off at this point.
*/
- prof_counter[cpu] = prof_multiplier[cpu];
- if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
- __setup_APIC_LVTT(calibration_result/prof_counter[cpu]);
- prof_old_multiplier[cpu] = prof_counter[cpu];
+ cpu_data[cpu].prof_counter = cpu_data[cpu].prof_multiplier;
+ if (cpu_data[cpu].prof_counter != cpu_data[cpu].prof_old_multiplier) {
+ __setup_APIC_LVTT(calibration_result/cpu_data[cpu].prof_counter);
+ cpu_data[cpu].prof_old_multiplier = cpu_data[cpu].prof_counter;
}

/*
@@ -931,14 +927,13 @@
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-unsigned int apic_timer_irqs [NR_CPUS] = { 0, };

void smp_apic_timer_interrupt(struct pt_regs * regs)
{
/*
* the NMI deadlock-detector uses this.
*/
- apic_timer_irqs[smp_processor_id()]++;
+ current_cpu_data.apic_timer_irqs++;

/*
* NOTE! We'd better ACK the irq immediately,
diff -ur 2.3/arch/i386/kernel/smpboot.c build-2.3/arch/i386/kernel/smpboot.c
--- 2.3/arch/i386/kernel/smpboot.c Fri Oct 22 22:58:22 1999
+++ build-2.3/arch/i386/kernel/smpboot.c Fri Oct 29 22:21:51 1999
@@ -74,8 +74,6 @@
static volatile unsigned long cpu_callin_map = 0;
static volatile unsigned long cpu_callout_map = 0;

-/* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS];
/* Processor that is doing the boot up */
static unsigned int boot_cpu_id = 0;

@@ -88,7 +86,6 @@
* Various Linux-internal data structures created from the
* MP-table.
*/
-int apic_version [NR_CPUS];
int mp_bus_id_to_type [MAX_MP_BUSSES] = { -1, };
extern int nr_ioapics;
extern struct mpc_config_ioapic mp_ioapics [MAX_IO_APICS];
@@ -241,7 +238,7 @@
printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
ver = 0x10;
}
- apic_version[m->mpc_apicid] = ver;
+ cpu_data[m->mpc_apicid].apic_version = ver;
}

static void __init MP_bus_info (struct mpc_config_bus *m)
@@ -485,8 +482,8 @@
* It's initialized to zero otherwise, representing
* a discrete 82489DX.
*/
- apic_version[0] = 0x10;
- apic_version[1] = 0x10;
+ cpu_data[0].apic_version = 0x10;
+ cpu_data[1].apic_version = 0x10;
}
/*
* Read the physical hardware table. Anything here will override the
@@ -584,8 +581,8 @@
smp_found_config = 1;

cpu_present_map |= 2; /* or in id 1 */
- apic_version[1] |= 0x10; /* integrated APIC */
- apic_version[0] |= 0x10;
+ cpu_data[0].apic_version |= 0x10; /* integrated APIC */
+ cpu_data[1].apic_version |= 0x10;

mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
}
@@ -649,24 +646,25 @@

void __init smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c=&cpu_data[id];
+ struct cpu_state *c=&cpu_data[id];

- *c = boot_cpu_data;
+ c->x86 = boot_cpu_data;
c->pte_quick = 0;
c->pgd_quick = 0;
c->pgtable_cache_sz = 0;
- identify_cpu(c);
+ identify_cpu(&c->x86);
/*
* Mask B, Pentium, but not Pentium MMX
*/
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3)
+ if (c->x86.x86_vendor == X86_VENDOR_INTEL &&
+ c->x86.x86 == 5 &&
+ c->x86.x86_mask >= 1 && c->x86.x86_mask <= 4 &&
+ c->x86.x86_model <= 3) {
/*
* Remember we have B step Pentia with bugs
*/
smp_b_stepping = 1;
+ }
}

/*
@@ -858,7 +856,6 @@
static atomic_t tsc_start_flag = ATOMIC_INIT(0);
static atomic_t tsc_count_start = ATOMIC_INIT(0);
static atomic_t tsc_count_stop = ATOMIC_INIT(0);
-static unsigned long long tsc_values[NR_CPUS] = { 0, };

#define NR_LOOPS 5

@@ -930,7 +927,7 @@
*/
atomic_inc(&tsc_count_start);

- rdtscll(tsc_values[smp_processor_id()]);
+ rdtscll(current_cpu_data.tsc_values);
/*
* We clear the TSC in the last loop:
*/
@@ -951,7 +948,7 @@
if (!(cpu_online_map & (1 << i)))
continue;

- t0 = tsc_values[i];
+ t0 = cpu_data[i].tsc_values;
sum += t0;
}
avg = div64(sum, smp_num_cpus);
@@ -961,7 +958,7 @@
if (!(cpu_online_map & (1 << i)))
continue;

- delta = tsc_values[i] - avg;
+ delta = cpu_data[i].tsc_values - avg;
if (delta < 0)
delta = -delta;
/*
@@ -974,7 +971,7 @@
printk("\n");
}
realdelta = div64(delta, one_usec);
- if (tsc_values[i] < avg)
+ if (cpu_data[i].tsc_values < avg)
realdelta = -realdelta;

printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
@@ -1002,7 +999,7 @@
atomic_inc(&tsc_count_start);
while (atomic_read(&tsc_count_start) != smp_num_cpus) mb();

- rdtscll(tsc_values[smp_processor_id()]);
+ rdtscll(current_cpu_data.tsc_values);
if (i == NR_LOOPS-1)
write_tsc(0, 0);

@@ -1215,7 +1212,7 @@
* Be paranoid about clearing APIC errors.
*/

- if (APIC_INTEGRATED(apic_version[i])) {
+ if (APIC_INTEGRATED(cpu_data[i].apic_version)) {
apic_readaround(APIC_SPIV);
apic_write(APIC_ESR, 0);
accept_status = (apic_read(APIC_ESR) & 0xEF);
@@ -1274,7 +1271,7 @@
* send the STARTUP IPIs.
*/

- if (APIC_INTEGRATED(apic_version[i]))
+ if (APIC_INTEGRATED(cpu_data[i].apic_version))
num_starts = 2;
else
num_starts = 0;
@@ -1353,7 +1350,7 @@
/* number CPUs logically, starting from 1 (BSP is 0) */
printk("OK.\n");
printk("CPU%d: ", i);
- print_cpu_info(&cpu_data[i]);
+ print_cpu_info(&cpu_data[i].x86);
} else {
if (*((volatile unsigned char *)phys_to_virt(8192))
== 0xA5) /* trampoline code not run */
@@ -1413,10 +1410,6 @@
* Cycle through the processors sending APIC IPIs to boot each.
*/

-extern int prof_multiplier[NR_CPUS];
-extern int prof_old_multiplier[NR_CPUS];
-extern int prof_counter[NR_CPUS];
-
void __init smp_boot_cpus(void)
{
int i;
@@ -1432,9 +1425,9 @@

for (i = 0; i < NR_CPUS; i++) {
cpu_number_map[i] = -1;
- prof_counter[i] = 1;
- prof_old_multiplier[i] = 1;
- prof_multiplier[i] = 1;
+ cpu_data[i].prof_counter = 1;
+ cpu_data[i].prof_old_multiplier = 1;
+ cpu_data[i].prof_multiplier = 1;
}

/*
@@ -1444,7 +1437,7 @@
smp_store_cpu_info(boot_cpu_id); /* Final full version of the data */
smp_tune_scheduling();
printk("CPU%d: ", boot_cpu_id);
- print_cpu_info(&cpu_data[boot_cpu_id]);
+ print_cpu_info(&cpu_data[boot_cpu_id].x86);

/*
* not necessary because the MP table should list the boot
@@ -1592,7 +1585,7 @@
unsigned long bogosum = 0;
for(i = 0; i < 32; i++)
if (cpu_online_map&(1<<i))
- bogosum+=cpu_data[i].loops_per_sec;
+ bogosum+=cpu_data[i].x86.loops_per_sec;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1,
(bogosum+2500)/500000,
diff -ur 2.3/arch/i386/kernel/traps.c build-2.3/arch/i386/kernel/traps.c
--- 2.3/arch/i386/kernel/traps.c Fri Oct 22 22:58:22 1999
+++ build-2.3/arch/i386/kernel/traps.c Fri Oct 29 17:43:16 1999
@@ -334,8 +334,6 @@
printk("Do you have a strange power saving mode enabled?\n");
}

-atomic_t nmi_counter[NR_CPUS];
-
#if CONFIG_SMP

int nmi_watchdog = 1;
@@ -368,24 +366,21 @@
* here too!]
*/

- static unsigned int last_irq_sums [NR_CPUS] = { 0, },
- alert_counter [NR_CPUS] = { 0, };
-
/*
* Since current-> is always on the stack, and we always switch
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
int sum, cpu = smp_processor_id();

- sum = apic_timer_irqs[cpu];
+ sum = cpu_data[cpu].apic_timer_irqs;

- if (last_irq_sums[cpu] == sum) {
+ if (cpu_data[cpu].last_irq_sums == sum) {
/*
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- alert_counter[cpu]++;
- if (alert_counter[cpu] == 5*HZ) {
+ cpu_data[cpu].alert_counter++;
+ if (cpu_data[cpu].alert_counter == 5*HZ) {
spin_lock(&nmi_print_lock);
spin_unlock(&console_lock); // we are in trouble anyway
printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu);
@@ -396,8 +391,8 @@
do_exit(SIGSEGV);
}
} else {
- last_irq_sums[cpu] = sum;
- alert_counter[cpu] = 0;
+ cpu_data[cpu].last_irq_sums = sum;
+ cpu_data[cpu].alert_counter = 0;
}
}
#endif
@@ -406,7 +401,7 @@
{
unsigned char reason = inb(0x61);

- atomic_inc(nmi_counter+smp_processor_id());
+ atomic_inc(&current_cpu_data.nmi_counter);
if (!(reason & 0xc0)) {
#if CONFIG_SMP
/*
diff -ur 2.3/arch/i386/lib/delay.c build-2.3/arch/i386/lib/delay.c
--- 2.3/arch/i386/lib/delay.c Sun Dec 27 19:36:38 1998
+++ build-2.3/arch/i386/lib/delay.c Fri Oct 29 17:43:16 1999
@@ -35,7 +35,7 @@
int d0;
__asm__("mull %0"
:"=d" (xloops), "=&a" (d0)
- :"1" (xloops),"0" (current_cpu_data.loops_per_sec));
+ :"1" (xloops),"0" (current_cpu_data.x86.loops_per_sec));
__delay(xloops);
}

diff -ur 2.3/include/asm-i386/hardirq.h build-2.3/include/asm-i386/hardirq.h
--- 2.3/include/asm-i386/hardirq.h Thu Aug 26 17:59:45 1999
+++ build-2.3/include/asm-i386/hardirq.h Fri Oct 29 17:43:16 1999
@@ -2,23 +2,22 @@
#define __ASM_HARDIRQ_H

#include <linux/threads.h>
-
-extern unsigned int local_irq_count[NR_CPUS];
+#include <asm/processor.h>

/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+ (cpu_data[__cpu].local_irq_count + cpu_data[__cpu].local_bh_count != 0); })

#ifndef __SMP__

-#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_trylock(cpu) (cpu_data[cpu].local_irq_count == 0)
#define hardirq_endlock(cpu) do { } while (0)

-#define hardirq_enter(cpu) (local_irq_count[cpu]++)
-#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+#define hardirq_enter(cpu) (cpu_data[cpu].local_irq_count++)
+#define hardirq_exit(cpu) (cpu_data[cpu].local_irq_count--)

#define synchronize_irq() barrier()

@@ -41,19 +40,19 @@

static inline void hardirq_enter(int cpu)
{
- ++local_irq_count[cpu];
+ cpu_data[cpu].local_irq_count++;
atomic_inc(&global_irq_count);
}

static inline void hardirq_exit(int cpu)
{
atomic_dec(&global_irq_count);
- --local_irq_count[cpu];
+ cpu_data[cpu].local_irq_count--;
}

static inline int hardirq_trylock(int cpu)
{
- return !local_irq_count[cpu] && !test_bit(0,&global_irq_lock);
+ return !cpu_data[cpu].local_irq_count && !test_bit(0,&global_irq_lock);
}

#define hardirq_endlock(cpu) do { } while (0)
diff -ur 2.3/include/asm-i386/processor.h build-2.3/include/asm-i386/processor.h
--- 2.3/include/asm-i386/processor.h Thu Oct 28 15:01:27 1999
+++ build-2.3/include/asm-i386/processor.h Fri Oct 29 17:43:16 1999
@@ -12,6 +12,7 @@
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/types.h>
+#include <asm/atomic.h>
#include <linux/threads.h>

/*
@@ -45,10 +46,6 @@
int f00f_bug;
int coma_bug;
unsigned long loops_per_sec;
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
};

#define X86_VENDOR_INTEL 0
@@ -99,13 +96,34 @@
extern struct cpuinfo_x86 boot_cpu_data;
extern struct tss_struct init_tss[NR_CPUS];

+struct cpu_state {
+ struct cpuinfo_x86 x86;
+ unsigned long *pgd_quick;
+ unsigned long *pmd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+
+ unsigned int local_bh_count;
+ unsigned int local_irq_count;
+ atomic_t nmi_counter;
+
#ifdef __SMP__
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data &boot_cpu_data
-#define current_cpu_data boot_cpu_data
+ unsigned int apic_timer_irqs;
+ int apic_version;
+
+ unsigned long long tsc_values;
+
+ int prof_multiplier;
+ int prof_old_multiplier;
+ int prof_counter;
+
+ unsigned long last_irq_sums;
+ unsigned long alert_counter;
#endif
+};
+
+extern struct cpu_state cpu_data[NR_CPUS];
+#define current_cpu_data cpu_data[smp_processor_id()]

#define cpu_has_pge \
(boot_cpu_data.x86_capability & X86_FEATURE_PGE)
@@ -114,7 +132,7 @@
#define cpu_has_pae \
(boot_cpu_data.x86_capability & X86_FEATURE_PAE)
#define cpu_has_tsc \
- (cpu_data[smp_processor_id()].x86_capability & X86_FEATURE_TSC)
+ (cpu_data[smp_processor_id()].x86.x86_capability & X86_FEATURE_TSC)

extern char ignore_irq13;

diff -ur 2.3/include/asm-i386/smp.h build-2.3/include/asm-i386/smp.h
--- 2.3/include/asm-i386/smp.h Fri Oct 22 22:58:29 1999
+++ build-2.3/include/asm-i386/smp.h Fri Oct 29 17:43:16 1999
@@ -198,8 +198,6 @@
return *((volatile unsigned long *)(APIC_BASE+reg));
}

-extern unsigned int apic_timer_irqs [NR_CPUS];
-
#ifdef CONFIG_X86_GOOD_APIC
# define FORCE_READ_AROUND_WRITE 0
# define apic_readaround(x)
diff -ur 2.3/include/asm-i386/softirq.h build-2.3/include/asm-i386/softirq.h
--- 2.3/include/asm-i386/softirq.h Wed Sep 1 18:22:47 1999
+++ build-2.3/include/asm-i386/softirq.h Fri Oct 29 17:43:16 1999
@@ -4,13 +4,11 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>

-extern unsigned int local_bh_count[NR_CPUS];
+#define cpu_bh_disable(cpu) do { cpu_data[(cpu)].local_bh_count++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); cpu_data[(cpu)].local_bh_count--; } while (0)

-#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
+#define cpu_bh_trylock(cpu) (cpu_data[(cpu)].local_bh_count ? 0 : (cpu_data[(cpu)].local_bh_count = 1))
+#define cpu_bh_endlock(cpu) (cpu_data[(cpu)].local_bh_count = 0)

#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
\
 
 \ /
  Last update: 2005-03-22 13:54    [W:0.046 / U:0.120 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site