ec0d7f18ab
Pull fpu state cleanups from Ingo Molnar: "This tree streamlines further aspects of FPU handling by eliminating the prepare_to_copy() complication and moving that logic to arch_dup_task_struct(). It also fixes the FPU dumps in threaded core dumps, removes and old (and now invalid) assumption plus micro-optimizes the exit path by avoiding an FPU save for dead tasks." Fixed up trivial add-add conflict in arch/sh/kernel/process.c that came in because we now do the FPU handling in arch_dup_task_struct() rather than the legacy (and now gone) prepare_to_copy(). * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, fpu: drop the fpu state during thread exit x86, xsave: remove thread_has_fpu() bug check in __sanitize_i387_state() coredump: ensure the fpu state is flushed for proper multi-threaded core dump fork: move the real prepare_to_copy() users to arch_dup_task_struct()
79 lines
2.1 KiB
C
79 lines
2.1 KiB
C
/*
|
|
* include/asm-cris/processor.h
|
|
*
|
|
* Copyright (C) 2000, 2001 Axis Communications AB
|
|
*
|
|
* Authors: Bjorn Wesen Initial version
|
|
*
|
|
*/
|
|
|
|
#ifndef __ASM_CRIS_PROCESSOR_H
|
|
#define __ASM_CRIS_PROCESSOR_H
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/ptrace.h>
|
|
#include <arch/processor.h>
|
|
#include <arch/system.h>
|
|
|
|
struct task_struct;
|
|
|
|
#define STACK_TOP TASK_SIZE
|
|
#define STACK_TOP_MAX STACK_TOP
|
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
* space during mmap's.
|
|
*/
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
|
|
|
/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
|
|
* normally, the stack is found by doing something like p + THREAD_SIZE
|
|
* in CRIS, a page is 8192 bytes, which seems like a sane size
|
|
*/
|
|
#define THREAD_SIZE PAGE_SIZE
|
|
#define THREAD_SIZE_ORDER (0)
|
|
|
|
/*
|
|
* At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
|
|
* This macro allows us to find those regs for a task.
|
|
* Notice that subsequent pt_regs stackings, like recursive interrupts occurring while
|
|
* we're in the kernel, won't affect this - only the first user->kernel transition
|
|
* registers are reached by this.
|
|
*/
|
|
|
|
#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1)
|
|
|
|
/*
|
|
* Dito but for the currently running task
|
|
*/
|
|
|
|
#define task_pt_regs(task) user_regs(task_thread_info(task))
|
|
#define current_regs() task_pt_regs(current)
|
|
|
|
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
|
|
|
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
|
|
/* Free all resources held by a thread. */
|
|
static inline void release_thread(struct task_struct *dead_task)
|
|
{
|
|
/* Nothing needs to be done. */
|
|
}
|
|
|
|
#define init_stack (init_thread_union.stack)
|
|
|
|
#define cpu_relax() barrier()
|
|
|
|
/*
|
|
* disable hlt during certain critical i/o operations
|
|
*/
|
|
#define HAVE_DISABLE_HLT
|
|
void disable_hlt(void);
|
|
void enable_hlt(void);
|
|
|
|
void default_idle(void);
|
|
|
|
#endif /* __ASM_CRIS_PROCESSOR_H */
|