linux/fs/xfs/linux-2.6/xfs_file.c

481 lines
11 KiB
C
Raw Normal View History

/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_ioctl32.h"
#include <linux/dcache.h>
#include <linux/smp_lock.h>
static struct vm_operations_struct xfs_file_vm_ops;
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct xfs_dmapi_file_vm_ops;
#endif
STATIC_INLINE ssize_t
__xfs_file_read(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
int ioflags,
loff_t pos)
{
struct file *file = iocb->ki_filp;
bhv_vnode_t *vp = vn_from_inode(file->f_path.dentry->d_inode);
BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
return bhv_vop_read(vp, iocb, iov, nr_segs, &iocb->ki_pos,
ioflags, NULL);
}
STATIC ssize_t
xfs_file_aio_read(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
}
STATIC ssize_t
xfs_file_aio_read_invis(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
}
STATIC_INLINE ssize_t
__xfs_file_write(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
int ioflags,
loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
bhv_vnode_t *vp = vn_from_inode(inode);
BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
return bhv_vop_write(vp, iocb, iov, nr_segs, &iocb->ki_pos,
ioflags, NULL);
}
STATIC ssize_t
xfs_file_aio_write(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
}
STATIC ssize_t
xfs_file_aio_write_invis(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
}
STATIC ssize_t
xfs_file_splice_read(
struct file *infilp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
infilp, ppos, pipe, len, flags, 0, NULL);
}
STATIC ssize_t
xfs_file_splice_read_invis(
struct file *infilp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
infilp, ppos, pipe, len, flags, IO_INVIS,
NULL);
}
STATIC ssize_t
xfs_file_splice_write(
struct pipe_inode_info *pipe,
struct file *outfilp,
loff_t *ppos,
size_t len,
unsigned int flags)
{
return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
pipe, outfilp, ppos, len, flags, 0, NULL);
}
STATIC ssize_t
xfs_file_splice_write_invis(
struct pipe_inode_info *pipe,
struct file *outfilp,
loff_t *ppos,
size_t len,
unsigned int flags)
{
return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
pipe, outfilp, ppos, len, flags, IO_INVIS,
NULL);
}
STATIC int
xfs_file_open(
struct inode *inode,
struct file *filp)
{
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EFBIG;
return -bhv_vop_open(vn_from_inode(inode), NULL);
}
STATIC int
xfs_file_release(
struct inode *inode,
struct file *filp)
{
bhv_vnode_t *vp = vn_from_inode(inode);
if (vp)
return -bhv_vop_release(vp);
return 0;
}
STATIC int
xfs_file_fsync(
struct file *filp,
struct dentry *dentry,
int datasync)
{
bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
int flags = FSYNC_WAIT;
if (datasync)
flags |= FSYNC_DATA;
if (VN_TRUNC(vp))
VUNTRUNCATE(vp);
return -bhv_vop_fsync(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1);
}
#ifdef CONFIG_XFS_DMAPI
STATIC struct page *
xfs_vm_nopage(
struct vm_area_struct *area,
unsigned long address,
int *type)
{
struct inode *inode = area->vm_file->f_path.dentry->d_inode;
bhv_vnode_t *vp = vn_from_inode(inode);
ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
return NULL;
return filemap_nopage(area, address, type);
}
#endif /* CONFIG_XFS_DMAPI */
STATIC int
xfs_file_readdir(
struct file *filp,
void *dirent,
filldir_t filldir)
{
int error = 0;
bhv_vnode_t *vp = vn_from_inode(filp->f_path.dentry->d_inode);
uio_t uio;
iovec_t iov;
int eof = 0;
caddr_t read_buf;
int namelen, size = 0;
size_t rlen = PAGE_CACHE_SIZE;
xfs_off_t start_offset, curr_offset;
xfs_dirent_t *dbp = NULL;
/* Try fairly hard to get memory */
do {
if ((read_buf = kmalloc(rlen, GFP_KERNEL)))
break;
rlen >>= 1;
} while (rlen >= 1024);
if (read_buf == NULL)
return -ENOMEM;
uio.uio_iov = &iov;
uio.uio_segflg = UIO_SYSSPACE;
curr_offset = filp->f_pos;
if (filp->f_pos != 0x7fffffff)
uio.uio_offset = filp->f_pos;
else
uio.uio_offset = 0xffffffff;
while (!eof) {
uio.uio_resid = iov.iov_len = rlen;
iov.iov_base = read_buf;
uio.uio_iovcnt = 1;
start_offset = uio.uio_offset;
error = bhv_vop_readdir(vp, &uio, NULL, &eof);
if ((uio.uio_offset == start_offset) || error) {
size = 0;
break;
}
size = rlen - uio.uio_resid;
dbp = (xfs_dirent_t *)read_buf;
while (size > 0) {
namelen = strlen(dbp->d_name);
if (filldir(dirent, dbp->d_name, namelen,
(loff_t) curr_offset & 0x7fffffff,
(ino_t) dbp->d_ino,
DT_UNKNOWN)) {
goto done;
}
size -= dbp->d_reclen;
curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
}
}
done:
if (!error) {
if (size == 0)
filp->f_pos = uio.uio_offset & 0x7fffffff;
else if (dbp)
filp->f_pos = curr_offset;
}
kfree(read_buf);
return -error;
}
STATIC int
xfs_file_mmap(
struct file *filp,
struct vm_area_struct *vma)
{
vma->vm_ops = &xfs_file_vm_ops;
mm: fix fault vs invalidate race for linear mappings Fix the race between invalidate_inode_pages and do_no_page. Andrea Arcangeli identified a subtle race between invalidation of pages from pagecache with userspace mappings, and do_no_page. The issue is that invalidation has to shoot down all mappings to the page, before it can be discarded from the pagecache. Between shooting down ptes to a particular page, and actually dropping the struct page from the pagecache, do_no_page from any process might fault on that page and establish a new mapping to the page just before it gets discarded from the pagecache. The most common case where such invalidation is used is in file truncation. This case was catered for by doing a sort of open-coded seqlock between the file's i_size, and its truncate_count. Truncation will decrease i_size, then increment truncate_count before unmapping userspace pages; do_no_page will read truncate_count, then find the page if it is within i_size, and then check truncate_count under the page table lock and back out and retry if it had subsequently been changed (ptl will serialise against unmapping, and ensure a potentially updated truncate_count is actually visible). Complexity and documentation issues aside, the locking protocol fails in the case where we would like to invalidate pagecache inside i_size. do_no_page can come in anytime and filemap_nopage is not aware of the invalidation in progress (as it is when it is outside i_size). The end result is that dangling (->mapping == NULL) pages that appear to be from a particular file may be mapped into userspace with nonsense data. Valid mappings to the same place will see a different page. Andrea implemented two working fixes, one using a real seqlock, another using a page->flags bit. He also proposed using the page lock in do_no_page, but that was initially considered too heavyweight. However, it is not a global or per-file lock, and the page cacheline is modified in do_no_page to increment _count and _mapcount anyway, so a further modification should not be a large performance hit. Scalability is not an issue. This patch implements this latter approach. ->nopage implementations return with the page locked if it is possible for their underlying file to be invalidated (in that case, they must set a special vm_flags bit to indicate so). do_no_page only unlocks the page after setting up the mapping completely. invalidation is excluded because it holds the page lock during invalidation of each page (and ensures that the page is not mapped while holding the lock). This also allows significant simplifications in do_no_page, because we have the page locked in the right place in the pagecache from the start. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 08:46:57 +00:00
vma->vm_flags |= VM_CAN_INVALIDATE;
#ifdef CONFIG_XFS_DMAPI
if (vn_from_inode(filp->f_path.dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
vma->vm_ops = &xfs_dmapi_file_vm_ops;
#endif /* CONFIG_XFS_DMAPI */
file_accessed(filp);
return 0;
}
STATIC long
xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
int error;
struct inode *inode = filp->f_path.dentry->d_inode;
bhv_vnode_t *vp = vn_from_inode(inode);
error = bhv_vop_ioctl(vp, inode, filp, 0, cmd, (void __user *)p);
VMODIFY(vp);
/* NOTE: some of the ioctl's return positive #'s as a
* byte count indicating success, such as
* readlink_by_handle. So we don't "sign flip"
* like most other routines. This means true
* errors need to be returned as a negative value.
*/
return error;
}
STATIC long
xfs_file_ioctl_invis(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
int error;
struct inode *inode = filp->f_path.dentry->d_inode;
bhv_vnode_t *vp = vn_from_inode(inode);
error = bhv_vop_ioctl(vp, inode, filp, IO_INVIS, cmd, (void __user *)p);
VMODIFY(vp);
/* NOTE: some of the ioctl's return positive #'s as a
* byte count indicating success, such as
* readlink_by_handle. So we don't "sign flip"
* like most other routines. This means true
* errors need to be returned as a negative value.
*/
return error;
}
#ifdef CONFIG_XFS_DMAPI
#ifdef HAVE_VMOP_MPROTECT
STATIC int
xfs_vm_mprotect(
struct vm_area_struct *vma,
unsigned int newflags)
{
bhv_vnode_t *vp = vn_from_inode(vma->vm_file->f_path.dentry->d_inode);
int error = 0;
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
if ((vma->vm_flags & VM_MAYSHARE) &&
(newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
}
}
return error;
}
#endif /* HAVE_VMOP_MPROTECT */
#endif /* CONFIG_XFS_DMAPI */
#ifdef HAVE_FOP_OPEN_EXEC
/* If the user is attempting to execute a file that is offline then
* we have to trigger a DMAPI READ event before the file is marked as busy
* otherwise the invisible I/O will not be able to write to the file to bring
* it back online.
*/
STATIC int
xfs_file_open_exec(
struct inode *inode)
{
bhv_vnode_t *vp = vn_from_inode(inode);
if (unlikely(vp->v_vfsp->vfs_flag & VFS_DMI)) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
xfs_inode_t *ip = xfs_vtoi(vp);
if (!ip)
return -EINVAL;
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ))
return -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
0, 0, 0, NULL);
}
return 0;
}
#endif /* HAVE_FOP_OPEN_EXEC */
const struct file_operations xfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = xfs_file_aio_read,
.aio_write = xfs_file_aio_write,
.splice_read = xfs_file_splice_read,
.splice_write = xfs_file_splice_write,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_ioctl,
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
#ifdef HAVE_FOP_OPEN_EXEC
.open_exec = xfs_file_open_exec,
#endif
};
const struct file_operations xfs_invis_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = xfs_file_aio_read_invis,
.aio_write = xfs_file_aio_write_invis,
.splice_read = xfs_file_splice_read_invis,
.splice_write = xfs_file_splice_write_invis,
.unlocked_ioctl = xfs_file_ioctl_invis,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_invis_ioctl,
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
};
const struct file_operations xfs_dir_file_operations = {
.read = generic_read_dir,
.readdir = xfs_file_readdir,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_ioctl,
#endif
.fsync = xfs_file_fsync,
};
static struct vm_operations_struct xfs_file_vm_ops = {
.nopage = filemap_nopage,
.populate = filemap_populate,
};
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
.nopage = xfs_vm_nopage,
.populate = filemap_populate,
#ifdef HAVE_VMOP_MPROTECT
.mprotect = xfs_vm_mprotect,
#endif
};
#endif /* CONFIG_XFS_DMAPI */