linux/arch/um/sys-i386/ldt.c

579 lines
13 KiB
C
Raw Normal View History

/*
* Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/sched.h"
#include "linux/slab.h"
#include "linux/types.h"
#include "linux/errno.h"
#include "linux/spinlock.h"
#include "asm/uaccess.h"
#include "asm/smp.h"
#include "asm/ldt.h"
#include "asm/unistd.h"
#include "choose-mode.h"
#include "kern.h"
#include "mode_kern.h"
#include "os.h"
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
#ifdef CONFIG_MODE_TT
static long do_modify_ldt_tt(int func, void __user *ptr,
unsigned long bytecount)
{
struct user_desc info;
int res = 0;
void *buf = NULL;
void *p = NULL; /* What we pass to host. */
switch(func){
case 1:
case 0x11: /* write_ldt */
/* Do this check now to avoid overflows. */
if (bytecount != sizeof(struct user_desc)) {
res = -EINVAL;
goto out;
}
if(copy_from_user(&info, ptr, sizeof(info))) {
res = -EFAULT;
goto out;
}
p = &info;
break;
case 0:
case 2: /* read_ldt */
/* The use of info avoids kmalloc on the write case, not on the
* read one. */
buf = kmalloc(bytecount, GFP_KERNEL);
if (!buf) {
res = -ENOMEM;
goto out;
}
p = buf;
break;
default:
res = -ENOSYS;
goto out;
}
res = modify_ldt(func, p, bytecount);
if(res < 0)
goto out;
switch(func){
case 0:
case 2:
/* Modify_ldt was for reading and returned the number of read
* bytes.*/
if(copy_to_user(ptr, p, res))
res = -EFAULT;
break;
}
out:
kfree(buf);
return res;
}
#endif
#ifdef CONFIG_MODE_SKAS
#include "skas.h"
#include "skas_ptrace.h"
#include "asm/mmu_context.h"
#include "proc_mm.h"
long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
void **addr, int done)
{
long res;
if(proc_mm){
/* This is a special handling for the case, that the mm to
* modify isn't current->active_mm.
* If this is called directly by modify_ldt,
* (current->active_mm->context.skas.u == mm_idp)
* will be true. So no call to switch_mm_skas(mm_idp) is done.
* If this is called in case of init_new_ldt or PTRACE_LDT,
* mm_idp won't belong to current->active_mm, but child->mm.
* So we need to switch child's mm into our userspace, then
* later switch back.
*
* Note: I'm unsure: should interrupts be disabled here?
*/
if(!current->active_mm || current->active_mm == &init_mm ||
mm_idp != &current->active_mm->context.skas.id)
switch_mm_skas(mm_idp);
}
if(ptrace_ldt) {
struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
.func = func,
.ptr = desc,
.bytecount = sizeof(*desc)};
u32 cpu;
int pid;
if(!proc_mm)
pid = mm_idp->u.pid;
else {
cpu = get_cpu();
pid = userspace_pid[cpu];
}
res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
if(proc_mm)
put_cpu();
}
else {
void *stub_addr;
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
(sizeof(*desc) + sizeof(long) - 1) &
~(sizeof(long) - 1),
addr, &stub_addr);
if(!res){
unsigned long args[] = { func,
(unsigned long)stub_addr,
sizeof(*desc),
0, 0, 0 };
res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
0, addr, done);
}
}
if(proc_mm){
/* This is the second part of special handling, that makes
* PTRACE_LDT possible to implement.
*/
if(current->active_mm && current->active_mm != &init_mm &&
mm_idp != &current->active_mm->context.skas.id)
switch_mm_skas(&current->active_mm->context.skas.id);
}
return res;
}
static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
{
int res, n;
struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
.func = 0,
.bytecount = bytecount,
[PATCH] getting rid of all casts of k[cmz]alloc() calls Run this: #!/bin/sh for f in $(grep -Erl "\([^\)]*\) *k[cmz]alloc" *) ; do echo "De-casting $f..." perl -pi -e "s/ ?= ?\([^\)]*\) *(k[cmz]alloc) *\(/ = \1\(/" $f done And then go through and reinstate those cases where code is casting pointers to non-pointers. And then drop a few hunks which conflicted with outstanding work. Cc: Russell King <rmk@arm.linux.org.uk>, Ian Molton <spyro@f2s.com> Cc: Mikael Starvik <starvik@axis.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jeff Dike <jdike@addtoit.com> Cc: Greg KH <greg@kroah.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Paul Fulghum <paulkf@microgate.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Cc: Karsten Keil <kkeil@suse.de> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Ian Kent <raven@themaw.net> Cc: Steven French <sfrench@us.ibm.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Neil Brown <neilb@cse.unsw.edu.au> Cc: Jaroslav Kysela <perex@suse.cz> Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-13 08:35:56 +00:00
.ptr = kmalloc(bytecount, GFP_KERNEL)};
u32 cpu;
if(ptrace_ldt.ptr == NULL)
return -ENOMEM;
/* This is called from sys_modify_ldt only, so userspace_pid gives
* us the right number
*/
cpu = get_cpu();
res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
put_cpu();
if(res < 0)
goto out;
n = copy_to_user(ptr, ptrace_ldt.ptr, res);
if(n != 0)
res = -EFAULT;
out:
kfree(ptrace_ldt.ptr);
return res;
}
/*
* In skas mode, we hold our own ldt data in UML.
* Thus, the code implementing sys_modify_ldt_skas
* is very similar to (and mostly stolen from) sys_modify_ldt
* for arch/i386/kernel/ldt.c
* The routines copied and modified in part are:
* - read_ldt
* - read_default_ldt
* - write_ldt
* - sys_modify_ldt_skas
*/
static int read_ldt(void __user * ptr, unsigned long bytecount)
{
int i, err = 0;
unsigned long size;
uml_ldt_t * ldt = &current->mm->context.skas.ldt;
if(!ldt->entry_count)
goto out;
if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
err = bytecount;
if(ptrace_ldt){
return read_ldt_from_host(ptr, bytecount);
}
down(&ldt->semaphore);
if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
if(size > bytecount)
size = bytecount;
if(copy_to_user(ptr, ldt->u.entries, size))
err = -EFAULT;
bytecount -= size;
ptr += size;
}
else {
for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
i++){
size = PAGE_SIZE;
if(size > bytecount)
size = bytecount;
if(copy_to_user(ptr, ldt->u.pages[i], size)){
err = -EFAULT;
break;
}
bytecount -= size;
ptr += size;
}
}
up(&ldt->semaphore);
if(bytecount == 0 || err == -EFAULT)
goto out;
if(clear_user(ptr, bytecount))
err = -EFAULT;
out:
return err;
}
static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{
int err;
if(bytecount > 5*LDT_ENTRY_SIZE)
bytecount = 5*LDT_ENTRY_SIZE;
err = bytecount;
/* UML doesn't support lcall7 and lcall27.
* So, we don't really have a default ldt, but emulate
* an empty ldt of common host default ldt size.
*/
if(clear_user(ptr, bytecount))
err = -EFAULT;
return err;
}
static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
{
uml_ldt_t * ldt = &current->mm->context.skas.ldt;
struct mm_id * mm_idp = &current->mm->context.skas.id;
int i, err;
struct user_desc ldt_info;
struct ldt_entry entry0, *ldt_p;
void *addr = NULL;
err = -EINVAL;
if(bytecount != sizeof(ldt_info))
goto out;
err = -EFAULT;
if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
goto out;
err = -EINVAL;
if(ldt_info.entry_number >= LDT_ENTRIES)
goto out;
if(ldt_info.contents == 3){
if (func == 1)
goto out;
if (ldt_info.seg_not_present == 0)
goto out;
}
if(!ptrace_ldt)
down(&ldt->semaphore);
err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
if(err)
goto out_unlock;
else if(ptrace_ldt) {
/* With PTRACE_LDT available, this is used as a flag only */
ldt->entry_count = 1;
goto out;
}
if(ldt_info.entry_number >= ldt->entry_count &&
ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
i++){
if(i == 0)
memcpy(&entry0, ldt->u.entries,
sizeof(entry0));
ldt->u.pages[i] = (struct ldt_entry *)
__get_free_page(GFP_KERNEL|__GFP_ZERO);
if(!ldt->u.pages[i]){
err = -ENOMEM;
/* Undo the change in host */
memset(&ldt_info, 0, sizeof(ldt_info));
write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
goto out_unlock;
}
if(i == 0) {
memcpy(ldt->u.pages[0], &entry0,
sizeof(entry0));
memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
}
ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
}
}
if(ldt->entry_count <= ldt_info.entry_number)
ldt->entry_count = ldt_info.entry_number + 1;
if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
ldt_p = ldt->u.entries + ldt_info.entry_number;
else
ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
(func == 1 || LDT_empty(&ldt_info))){
ldt_p->a = 0;
ldt_p->b = 0;
}
else{
if (func == 1)
ldt_info.useable = 0;
ldt_p->a = LDT_entry_a(&ldt_info);
ldt_p->b = LDT_entry_b(&ldt_info);
}
err = 0;
out_unlock:
up(&ldt->semaphore);
out:
return err;
}
static long do_modify_ldt_skas(int func, void __user *ptr,
unsigned long bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(ptr, bytecount);
break;
case 1:
case 0x11:
ret = write_ldt(ptr, bytecount, func);
break;
case 2:
ret = read_default_ldt(ptr, bytecount);
break;
}
return ret;
}
static DEFINE_SPINLOCK(host_ldt_lock);
static short dummy_list[9] = {0, -1};
static short * host_ldt_entries = NULL;
static void ldt_get_host_info(void)
{
long ret;
struct ldt_entry * ldt;
short *tmp;
int i, size, k, order;
spin_lock(&host_ldt_lock);
if(host_ldt_entries != NULL){
spin_unlock(&host_ldt_lock);
return;
}
host_ldt_entries = dummy_list+1;
spin_unlock(&host_ldt_lock);
for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
ldt = (struct ldt_entry *)
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
if(ldt == NULL) {
printk("ldt_get_host_info: couldn't allocate buffer for host "
"ldt\n");
return;
}
ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
if(ret < 0) {
printk("ldt_get_host_info: couldn't read host ldt\n");
goto out_free;
}
if(ret == 0) {
/* default_ldt is active, simply write an empty entry 0 */
host_ldt_entries = dummy_list;
goto out_free;
}
for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
if(ldt[i].a != 0 || ldt[i].b != 0)
size++;
}
if(size < ARRAY_SIZE(dummy_list))
host_ldt_entries = dummy_list;
else {
size = (size + 1) * sizeof(dummy_list[0]);
tmp = kmalloc(size, GFP_KERNEL);
if(tmp == NULL) {
printk("ldt_get_host_info: couldn't allocate host ldt "
"list\n");
goto out_free;
}
host_ldt_entries = tmp;
}
for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
if(ldt[i].a != 0 || ldt[i].b != 0) {
host_ldt_entries[k++] = i;
}
}
host_ldt_entries[k] = -1;
out_free:
free_pages((unsigned long)ldt, order);
}
long init_new_ldt(struct mmu_context_skas * new_mm,
struct mmu_context_skas * from_mm)
{
struct user_desc desc;
short * num_p;
int i;
long page, err=0;
void *addr = NULL;
struct proc_mm_op copy;
if(!ptrace_ldt)
init_MUTEX(&new_mm->ldt.semaphore);
if(!from_mm){
memset(&desc, 0, sizeof(desc));
/*
* We have to initialize a clean ldt.
*/
if(proc_mm) {
/*
* If the new mm was created using proc_mm, host's
* default-ldt currently is assigned, which normally
* contains the call-gates for lcall7 and lcall27.
* To remove these gates, we simply write an empty
* entry as number 0 to the host.
*/
err = write_ldt_entry(&new_mm->id, 1, &desc,
&addr, 1);
}
else{
/*
* Now we try to retrieve info about the ldt, we
* inherited from the host. All ldt-entries found
* will be reset in the following loop
*/
ldt_get_host_info();
for(num_p=host_ldt_entries; *num_p != -1; num_p++){
desc.entry_number = *num_p;
err = write_ldt_entry(&new_mm->id, 1, &desc,
&addr, *(num_p + 1) == -1);
if(err)
break;
}
}
new_mm->ldt.entry_count = 0;
goto out;
}
if(proc_mm){
/* We have a valid from_mm, so we now have to copy the LDT of
* from_mm to new_mm, because using proc_mm an new mm with
* an empty/default LDT was created in new_mm()
*/
copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
.u =
{ .copy_segments =
from_mm->id.u.mm_fd } } );
uml: start fixing os_read_file and os_write_file This patch starts the removal of a very old, very broken piece of code. This stems from the problem of passing a userspace buffer into read() or write() on the host. If that buffer had not yet been faulted in, read and write will return -EFAULT. To avoid this problem, the solution was to fault the buffer in before the system call by touching the pages that hold the buffer by doing a copy-user of a byte to each page. This is obviously bogus, but it does usually work, in tt mode, since the kernel and process are in the same address space and userspace addresses can be accessed directly in the kernel. In skas mode, where the kernel and process are in separate address spaces, it is completely bogus because the userspace address, which is invalid in the kernel, is passed into the system call instead of the corresponding physical address, which would be valid. Here, it appears that this code, on every host read() or write(), tries to fault in a random process page. This doesn't seem to cause any correctness problems, but there is a performance impact. This patch, and the ones following, result in a 10-15% performance gain on a kernel build. This code can't be immediately tossed out because when it is, you can't log in. Apparently, there is some code in the console driver which depends on this somehow. However, we can start removing it by switching the code which does I/O using kernel addresses to using plain read() and write(). This patch introduces os_read_file_k and os_write_file_k for use with kernel buffers and converts all call locations which use obvious kernel buffers to use them. These include I/O using buffers which are local variables which are on the stack or kmalloc-ed. Later patches will handle the less obvious cases, followed by a mass conversion back to the original interface. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-06 21:51:32 +00:00
i = os_write_file_k(new_mm->id.u.mm_fd, &copy, sizeof(copy));
if(i != sizeof(copy))
printk("new_mm : /proc/mm copy_segments failed, "
"err = %d\n", -i);
}
if(!ptrace_ldt) {
/* Our local LDT is used to supply the data for
* modify_ldt(READLDT), if PTRACE_LDT isn't available,
* i.e., we have to use the stub for modify_ldt, which
* can't handle the big read buffer of up to 64kB.
*/
down(&from_mm->ldt.semaphore);
if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
sizeof(new_mm->ldt.u.entries));
}
else{
i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
while(i-->0){
page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!page){
err = -ENOMEM;
break;
}
new_mm->ldt.u.pages[i] =
(struct ldt_entry *) page;
memcpy(new_mm->ldt.u.pages[i],
from_mm->ldt.u.pages[i], PAGE_SIZE);
}
}
new_mm->ldt.entry_count = from_mm->ldt.entry_count;
up(&from_mm->ldt.semaphore);
}
out:
return err;
}
void free_ldt(struct mmu_context_skas * mm)
{
int i;
if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
while(i-- > 0){
free_page((long )mm->ldt.u.pages[i]);
}
}
mm->ldt.entry_count = 0;
}
#endif
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
{
return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
ptr, bytecount);
}