linux/include/asm-x86/pda.h

134 lines
3.8 KiB
C
Raw Normal View History

#ifndef X86_64_PDA_H
#define X86_64_PDA_H
#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <asm/page.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
struct task_struct *pcurrent; /* 0 Current process */
unsigned long data_offset; /* 8 Per cpu data offset from linker
address */
unsigned long kernelstack; /* 16 top of kernel stack for current */
unsigned long oldrsp; /* 24 user rsp for system call */
int irqcount; /* 32 Irq nesting counter. Starts with -1 */
unsigned int cpunumber; /* 36 Logical CPU number */
#ifdef CONFIG_CC_STACKPROTECTOR
unsigned long stack_canary; /* 40 stack canary value */
/* gcc-ABI: this canary MUST be at
offset 40!!! */
#endif
char *irqstackptr;
unsigned int nodenumber; /* number of current node */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
short mmu_state;
short isidle;
struct mm_struct *active_mm;
unsigned apic_timer_irqs;
unsigned irq0_irqs;
x86: expand /proc/interrupts to include missing vectors, v2 Add missing IRQs and IRQ descriptions to /proc/interrupts. /proc/interrupts is most useful when it displays every IRQ vector in use by the system, not just those somebody thought would be interesting. This patch inserts the following vector displays to the i386 and x86_64 platforms, as appropriate: rescheduling interrupts TLB flush interrupts function call interrupts thermal event interrupts threshold interrupts spurious interrupts A threshold interrupt occurs when ECC memory correction is occuring at too high a frequency. Thresholds are used by the ECC hardware as occasional ECC failures are part of normal operation, but long sequences of ECC failures usually indicate a memory chip that is about to fail. Thermal event interrupts occur when a temperature threshold has been exceeded for some CPU chip. IIRC, a thermal interrupt is also generated when the temperature drops back to a normal level. A spurious interrupt is an interrupt that was raised then lowered by the device before it could be fully processed by the APIC. Hence the apic sees the interrupt but does not know what device it came from. For this case the APIC hardware will assume a vector of 0xff. Rescheduling, call, and TLB flush interrupts are sent from one CPU to another per the needs of the OS. Typically, their statistics would be used to discover if an interrupt flood of the given type has been occuring. AK: merged v2 and v4 which had some more tweaks AK: replace Local interrupts with Local timer interrupts AK: Fixed description of interrupt types. [ tglx: arch/x86 adaptation ] [ mingo: small cleanup ] Signed-off-by: Joe Korty <joe.korty@ccur.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Tim Hockin <thockin@hockin.org> Cc: Andi Kleen <ak@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2007-10-17 16:04:40 +00:00
unsigned irq_resched_count;
unsigned irq_call_count;
unsigned irq_tlb_count;
unsigned irq_thermal_count;
unsigned irq_threshold_count;
unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;
extern struct x8664_pda *_cpu_pda[];
extern struct x8664_pda boot_cpu_pda[];
extern void pda_init(int);
#define cpu_pda(i) (_cpu_pda[i])
/*
* There is no fast way to get the base address of the PDA, all the accesses
* have to mention %fs/%gs. So it needs to be done this Torvaldian way.
*/
extern void __bad_pda_field(void) __attribute__((noreturn));
/*
* proxy_pda doesn't actually exist, but tell gcc it is accessed for
* all PDA accesses so it gets read/write dependencies right.
*/
extern struct x8664_pda _proxy_pda;
#define pda_offset(field) offsetof(struct x8664_pda, field)
#define pda_to_op(op,field,val) do { \
typedef typeof(_proxy_pda.field) T__; \
if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
case 4: \
asm(op "l %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i" (pda_offset(field))); \
break; \
case 8: \
asm(op "q %1,%%gs:%c2": \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
default: \
__bad_pda_field(); \
} \
} while (0)
#define pda_from_op(op,field) ({ \
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %%gs:%c1,%0" : \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 4: \
asm(op "l %%gs:%c1,%0": \
"=r" (ret__): \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 8: \
asm(op "q %%gs:%c1,%0": \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
default: \
__bad_pda_field(); \
} \
ret__; })
#define read_pda(field) pda_from_op("mov",field)
#define write_pda(field,val) pda_to_op("mov",field,val)
#define add_pda(field,val) pda_to_op("add",field,val)
#define sub_pda(field,val) pda_to_op("sub",field,val)
#define or_pda(field,val) pda_to_op("or",field,val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit,field) ({ \
int old__; \
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (_proxy_pda.field) \
: "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
old__; \
})
#endif
#define PDA_STACKOFFSET (5*8)
#endif