linux/arch/um/include/kern_util.h

124 lines
4.3 KiB
C
Raw Normal View History

/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __KERN_UTIL_H__
#define __KERN_UTIL_H__
#include "sysdep/ptrace.h"
[PATCH] uml: S390 preparation, abstract host page fault data This patch removes the arch-specific fault/trap-infos from thread and skas-regs. It adds a new struct faultinfo, that is arch-specific defined in sysdep/faultinfo.h. The structure is inserted in thread.arch and thread.regs.skas and thread.regs.tt Now, segv and other trap-handlers can copy the contents from regs.X.faultinfo to thread.arch.faultinfo with one simple assignment. Also, the number of macros necessary is reduced to FAULT_ADDRESS(struct faultinfo) extracts the faulting address from faultinfo FAULT_WRITE(struct faultinfo) extracts the "is_write" flag SEGV_IS_FIXABLE(struct faultinfo) is true for the fixable segvs, i.e. (TRAP == 14) on i386 UPT_FAULTINFO(regs) result is (struct faultinfo *) to the faultinfo in regs->skas.faultinfo GET_FAULTINFO_FROM_SC(struct faultinfo, struct sigcontext *) copies the relevant parts of the sigcontext to struct faultinfo. On SIGSEGV, call user_signal() instead of handle_segv(), if the architecture provides the information needed in PTRACE_FAULTINFO, or if PTRACE_FAULTINFO is missing, because segv-stub will provide the info. The benefit of the change is, that in case of a non-fixable SIGSEGV, we can give user processes a SIGSEGV, instead of possibly looping on pagefault handling. Since handle_segv() sikked arch_fixup() implicitly by passing ip==0 to segv(), I changed segv() to call arch_fixup() only, if !is_user. Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-05-05 23:15:31 +00:00
#include "sysdep/faultinfo.h"
#include "uml-config.h"
typedef void (*kern_hndl)(int, union uml_pt_regs *);
struct kern_handlers {
kern_hndl relay_signal;
kern_hndl winch;
kern_hndl bus_handler;
kern_hndl page_fault;
kern_hndl sigio_handler;
kern_hndl timer_handler;
};
extern const struct kern_handlers handlinfo_kern;
extern int ncpus;
extern char *gdb_init;
extern int kmalloc_ok;
extern int jail;
extern int nsyscalls;
#define UML_ROUND_DOWN(addr) ((void *)(((unsigned long) addr) & PAGE_MASK))
#define UML_ROUND_UP(addr) \
UML_ROUND_DOWN(((unsigned long) addr) + PAGE_SIZE - 1)
extern int kernel_fork(unsigned long flags, int (*fn)(void *), void * arg);
#ifdef UML_CONFIG_MODE_TT
extern unsigned long stack_sp(unsigned long page);
#endif
extern int kernel_thread_proc(void *data);
extern void syscall_segv(int sig);
extern int current_pid(void);
extern unsigned long alloc_stack(int order, int atomic);
extern int do_signal(void);
extern int is_stack_fault(unsigned long sp);
[PATCH] uml: S390 preparation, abstract host page fault data This patch removes the arch-specific fault/trap-infos from thread and skas-regs. It adds a new struct faultinfo, that is arch-specific defined in sysdep/faultinfo.h. The structure is inserted in thread.arch and thread.regs.skas and thread.regs.tt Now, segv and other trap-handlers can copy the contents from regs.X.faultinfo to thread.arch.faultinfo with one simple assignment. Also, the number of macros necessary is reduced to FAULT_ADDRESS(struct faultinfo) extracts the faulting address from faultinfo FAULT_WRITE(struct faultinfo) extracts the "is_write" flag SEGV_IS_FIXABLE(struct faultinfo) is true for the fixable segvs, i.e. (TRAP == 14) on i386 UPT_FAULTINFO(regs) result is (struct faultinfo *) to the faultinfo in regs->skas.faultinfo GET_FAULTINFO_FROM_SC(struct faultinfo, struct sigcontext *) copies the relevant parts of the sigcontext to struct faultinfo. On SIGSEGV, call user_signal() instead of handle_segv(), if the architecture provides the information needed in PTRACE_FAULTINFO, or if PTRACE_FAULTINFO is missing, because segv-stub will provide the info. The benefit of the change is, that in case of a non-fixable SIGSEGV, we can give user processes a SIGSEGV, instead of possibly looping on pagefault handling. Since handle_segv() sikked arch_fixup() implicitly by passing ip==0 to segv(), I changed segv() to call arch_fixup() only, if !is_user. Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-05-05 23:15:31 +00:00
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
int is_user, union uml_pt_regs *regs);
extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
extern void syscall_ready(void);
extern void set_tracing(void *t, int tracing);
extern int is_tracing(void *task);
extern int segv_syscall(void);
extern void kern_finish_exec(void *task, int new_pid, unsigned long stack);
extern unsigned long page_mask(void);
extern int need_finish_fork(void);
extern void free_stack(unsigned long stack, int order);
extern void add_input_request(int op, void (*proc)(int), void *arg);
extern char *current_cmd(void);
extern void timer_handler(int sig, union uml_pt_regs *regs);
extern int set_signals(int enable);
extern int pid_to_processor_id(int pid);
extern void deliver_signals(void *t);
extern int next_trap_index(int max);
extern void default_idle(void);
extern void finish_fork(void);
extern void paging_init(void);
extern void init_flush_vm(void);
extern void *syscall_sp(void *t);
extern void syscall_trace(union uml_pt_regs *regs, int entryexit);
extern int hz(void);
extern unsigned int do_IRQ(int irq, union uml_pt_regs *regs);
extern void interrupt_end(void);
extern void initial_thread_cb(void (*proc)(void *), void *arg);
extern int debugger_signal(int status, int pid);
extern void debugger_parent_signal(int status, int pid);
extern void child_signal(int pid, int status);
extern int init_ptrace_proxy(int idle_pid, int startup, int stop);
extern int init_parent_proxy(int pid);
extern int singlestepping(void *t);
extern void check_stack_overflow(void *ptr);
extern void relay_signal(int sig, union uml_pt_regs *regs);
extern int user_context(unsigned long sp);
extern void timer_irq(union uml_pt_regs *regs);
extern void unprotect_stack(unsigned long stack);
extern void do_uml_exitcalls(void);
extern int attach_debugger(int idle_pid, int pid, int stop);
extern int config_gdb(char *str);
extern int remove_gdb(void);
extern char *uml_strdup(char *string);
extern void unprotect_kernel_mem(void);
extern void protect_kernel_mem(void);
extern void uml_cleanup(void);
extern void lock_signalled_task(void *t);
extern void IPI_handler(int cpu);
extern int jail_setup(char *line, int *add);
extern void *get_init_task(void);
extern int clear_user_proc(void *buf, int size);
extern int copy_to_user_proc(void *to, void *from, int size);
extern int copy_from_user_proc(void *to, void *from, int size);
extern int strlen_user_proc(char *str);
extern long execute_syscall(void *r);
extern int smp_sigio_handler(void);
extern void *get_current(void);
extern struct task_struct *get_task(int pid, int require);
extern void machine_halt(void);
extern int is_syscall(unsigned long addr);
[PATCH] uml: clean arch_switch usage Call arch_switch also in switch_to_skas, even if it's, for now, a no-op for that case (and mark this in the comment); this will change soon. Also, arch_switch for TT mode is actually useless when the PT proxy (a complicate debugging instrumentation for TT mode) is not enabled. In fact, it only calls update_debugregs, which checks debugregs_seq against seq (to check if the registers are up-to-date - seq here means a "version number" of the registers). If the ptrace proxy is not enabled, debugregs_seq always stays 0 and update_debugregs will be a no-op. So, optimize this out (the compiler can't do it). Also, I've been disappointed by the fact that it would make a lot of sense if, after calling a successful update_debugregs(current->thread.arch.debugregs_seq), current->thread.arch.debugregs_seq were updated with the new debugregs_seq. But this is not done. Is this a bug or a feature? For all purposes, it seems a bug (otherwise the whole mechanism does not make sense, which is also a possibility to check), which causes some performance only problems (not correctness), since we write_debugregs when not needed. Also, as suggested by Jeff, remove a redundant enabling of SIGVTALRM, comprised in the subsequent local_irq_enable(). I'm just a bit dubious if ordering matters there... Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Acked-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 10:30:21 +00:00
extern void free_irq(unsigned int, void *);
extern int cpu(void);
extern void time_init_kern(void);
/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
extern int __cant_sleep(void);
extern void sigio_handler(int sig, union uml_pt_regs *regs);
extern void copy_sc(union uml_pt_regs *regs, void *from);
extern unsigned long to_irq_stack(unsigned long *mask_out);
uml: iRQ stacks Add a separate IRQ stack. This differs from i386 in having the entire interrupt run on a separate stack rather than starting on the normal kernel stack and switching over once some preparation has been done. The underlying mechanism, is of course, sigaltstack. Another difference is that interrupts that happen in userspace are handled on the normal kernel stack. These cause a wait wakeup instead of a signal delivery so there is no point in trying to switch stacks for these. There's no other stuff on the stack, so there is no extra stack consumption. This quirk makes it possible to have the entire interrupt run on a separate stack - process preemption (and calls to schedule()) happens on a normal kernel stack. If we enable CONFIG_PREEMPT, this will need to be rethought. The IRQ stack for CPU 0 is declared in the same way as the initial kernel stack. IRQ stacks for other CPUs will be allocated dynamically. An extra field was added to the thread_info structure. When the active thread_info is copied to the IRQ stack, the real_thread field points back to the original stack. This makes it easy to tell where to copy the thread_info struct back to when the interrupt is finished. It also serves as a marker of a nested interrupt. It is NULL for the first interrupt on the stack, and non-NULL for any nested interrupts. Care is taken to behave correctly if a second interrupt comes in when the thread_info structure is being set up or taken down. I could just disable interrupts here, but I don't feel like giving up any of the performance gained by not flipping signals on and off. If an interrupt comes in during these critical periods, the handler can't run because it has no idea what shape the stack is in. So, it sets a bit for its signal in a global mask and returns. The outer handler will deal with this signal itself. Atomicity is had with xchg. A nested interrupt that needs to bail out will xchg its signal mask into pending_mask and repeat in case yet another interrupt hit at the same time, until the mask stabilizes. The outermost interrupt will set up the thread_info and xchg a zero into pending_mask when it is done. At this point, nested interrupts will look at ->real_thread and see that no setup needs to be done. They can just continue normally. Similar care needs to be taken when exiting the outer handler. If another interrupt comes in while it is copying the thread_info, it will drop a bit into pending_mask. The outer handler will check this and if it is non-zero, will loop, set up the stack again, and handle the interrupt. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-11 05:22:34 +00:00
unsigned long from_irq_stack(int nested);
#endif