2008-07-30 19:06:12 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "drmP.h"
|
|
|
|
#include "drm.h"
|
|
|
|
#include "i915_drm.h"
|
|
|
|
#include "i915_drv.h"
|
2009-08-25 10:15:50 +00:00
|
|
|
#include "i915_trace.h"
|
2009-08-17 20:31:43 +00:00
|
|
|
#include "intel_drv.h"
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2008-07-30 19:06:12 +00:00
|
|
|
#include <linux/swap.h>
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
#include <linux/pci.h>
|
2010-08-27 03:08:57 +00:00
|
|
|
#include <linux/intel-gtt.h>
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-08-07 10:01:21 +00:00
|
|
|
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
|
2010-02-11 21:37:04 +00:00
|
|
|
|
|
|
|
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
|
|
|
|
bool pipelined);
|
2008-11-14 21:35:19 +00:00
|
|
|
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
|
|
|
|
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
|
|
|
|
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
|
|
|
|
int write);
|
|
|
|
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|
|
|
uint64_t offset,
|
|
|
|
uint64_t size);
|
|
|
|
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
|
2010-09-14 12:03:28 +00:00
|
|
|
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
|
|
|
|
bool interruptible);
|
2008-11-12 18:03:55 +00:00
|
|
|
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
|
|
|
unsigned alignment);
|
|
|
|
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv);
|
2010-07-23 22:18:50 +00:00
|
|
|
static void i915_gem_free_object_tail(struct drm_gem_object *obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-27 14:51:07 +00:00
|
|
|
static int
|
|
|
|
i915_gem_object_get_pages(struct drm_gem_object *obj,
|
|
|
|
gfp_t gfpmask);
|
|
|
|
|
|
|
|
static void
|
|
|
|
i915_gem_object_put_pages(struct drm_gem_object *obj);
|
|
|
|
|
2009-09-14 15:50:28 +00:00
|
|
|
static LIST_HEAD(shrink_list);
|
|
|
|
static DEFINE_SPINLOCK(shrink_list_lock);
|
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
/* some bookkeeping */
|
|
|
|
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.object_count++;
|
|
|
|
dev_priv->mm.object_memory += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.object_count--;
|
|
|
|
dev_priv->mm.object_memory -= size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.gtt_count++;
|
|
|
|
dev_priv->mm.gtt_memory += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.gtt_count--;
|
|
|
|
dev_priv->mm.gtt_memory -= size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.pin_count++;
|
|
|
|
dev_priv->mm.pin_memory += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
dev_priv->mm.pin_count--;
|
|
|
|
dev_priv->mm.pin_memory -= size;
|
|
|
|
}
|
|
|
|
|
2010-09-25 09:19:17 +00:00
|
|
|
int
|
|
|
|
i915_gem_check_is_wedged(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
struct completion *x = &dev_priv->error_completion;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!atomic_read(&dev_priv->mm.wedged))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = wait_for_completion_interruptible(x);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Success, we reset the GPU! */
|
|
|
|
if (!atomic_read(&dev_priv->mm.wedged))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* GPU is hung, bump the completion count to account for
|
|
|
|
* the token we just consumed so that we never hit zero and
|
|
|
|
* end up waiting upon a subsequent completion event that
|
|
|
|
* will never happen.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&x->wait.lock, flags);
|
|
|
|
x->done++;
|
|
|
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
static int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = i915_gem_check_is_wedged(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (atomic_read(&dev_priv->mm.wedged)) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2010-09-25 10:22:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-09-25 09:19:17 +00:00
|
|
|
|
2010-08-07 20:45:03 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
|
|
|
|
{
|
|
|
|
return obj_priv->gtt_space &&
|
|
|
|
!obj_priv->active &&
|
|
|
|
obj_priv->pin_count == 0;
|
|
|
|
}
|
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
int i915_gem_do_init(struct drm_device *dev,
|
|
|
|
unsigned long start,
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
unsigned long end)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
if (start >= end ||
|
|
|
|
(start & (PAGE_SIZE - 1)) != 0 ||
|
|
|
|
(end & (PAGE_SIZE - 1)) != 0) {
|
2008-07-30 19:06:12 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
drm_mm_init(&dev_priv->mm.gtt_space, start,
|
|
|
|
end - start);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
dev_priv->mm.gtt_total = end - start;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
int
|
|
|
|
i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_init *args = data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
|
2008-07-30 19:06:12 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-10-23 04:40:13 +00:00
|
|
|
int
|
|
|
|
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2010-09-30 10:46:12 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2008-10-23 04:40:13 +00:00
|
|
|
struct drm_i915_gem_get_aperture *args = data;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
args->aper_size = dev_priv->mm.gtt_total;
|
|
|
|
args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-10-23 04:40:13 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a new mm object and returns a handle to it.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_create *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
2009-08-23 09:40:55 +00:00
|
|
|
int ret;
|
|
|
|
u32 handle;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
args->size = roundup(args->size, PAGE_SIZE);
|
|
|
|
|
|
|
|
/* Allocate the new object */
|
2010-04-09 19:05:06 +00:00
|
|
|
obj = i915_gem_alloc_object(dev, args->size);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
2010-09-06 13:44:14 +00:00
|
|
|
if (ret) {
|
2010-10-14 12:20:40 +00:00
|
|
|
drm_gem_object_release(obj);
|
|
|
|
i915_gem_info_remove_obj(dev->dev_private, obj->size);
|
|
|
|
kfree(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
2010-09-06 13:44:14 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-10-14 12:20:40 +00:00
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
trace_i915_gem_object_create(obj);
|
|
|
|
|
2010-09-06 13:44:14 +00:00
|
|
|
args->handle = handle;
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-03-10 18:44:52 +00:00
|
|
|
static inline int
|
|
|
|
fast_shmem_read(struct page **pages,
|
|
|
|
loff_t page_base, int page_offset,
|
|
|
|
char __user *data,
|
|
|
|
int length)
|
|
|
|
{
|
2009-04-06 20:55:41 +00:00
|
|
|
int unwritten;
|
2010-10-14 12:47:43 +00:00
|
|
|
char *vaddr;
|
2009-03-10 18:44:52 +00:00
|
|
|
|
|
|
|
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
|
2009-04-06 20:55:41 +00:00
|
|
|
unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
|
2009-03-10 18:44:52 +00:00
|
|
|
kunmap_atomic(vaddr, KM_USER0);
|
|
|
|
|
2010-10-14 12:47:43 +00:00
|
|
|
return unwritten ? -EFAULT : 0;
|
2009-03-10 18:44:52 +00:00
|
|
|
}
|
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = obj->dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-12 23:56:27 +00:00
|
|
|
|
|
|
|
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
|
|
|
obj_priv->tiling_mode != I915_TILING_NONE;
|
|
|
|
}
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
static inline void
|
2009-03-09 20:42:30 +00:00
|
|
|
slow_shmem_copy(struct page *dst_page,
|
|
|
|
int dst_offset,
|
|
|
|
struct page *src_page,
|
|
|
|
int src_offset,
|
|
|
|
int length)
|
|
|
|
{
|
|
|
|
char *dst_vaddr, *src_vaddr;
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
dst_vaddr = kmap(dst_page);
|
|
|
|
src_vaddr = kmap(src_page);
|
2009-03-09 20:42:30 +00:00
|
|
|
|
|
|
|
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
kunmap(src_page);
|
|
|
|
kunmap(dst_page);
|
2009-03-09 20:42:30 +00:00
|
|
|
}
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
static inline void
|
2009-03-12 23:56:27 +00:00
|
|
|
slow_shmem_bit17_copy(struct page *gpu_page,
|
|
|
|
int gpu_offset,
|
|
|
|
struct page *cpu_page,
|
|
|
|
int cpu_offset,
|
|
|
|
int length,
|
|
|
|
int is_read)
|
|
|
|
{
|
|
|
|
char *gpu_vaddr, *cpu_vaddr;
|
|
|
|
|
|
|
|
/* Use the unswizzled path if this page isn't affected. */
|
|
|
|
if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
|
|
|
|
if (is_read)
|
|
|
|
return slow_shmem_copy(cpu_page, cpu_offset,
|
|
|
|
gpu_page, gpu_offset, length);
|
|
|
|
else
|
|
|
|
return slow_shmem_copy(gpu_page, gpu_offset,
|
|
|
|
cpu_page, cpu_offset, length);
|
|
|
|
}
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
gpu_vaddr = kmap(gpu_page);
|
|
|
|
cpu_vaddr = kmap(cpu_page);
|
2009-03-12 23:56:27 +00:00
|
|
|
|
|
|
|
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
|
|
|
|
* XORing with the other bits (A9 for Y, A9 and A10 for X)
|
|
|
|
*/
|
|
|
|
while (length > 0) {
|
|
|
|
int cacheline_end = ALIGN(gpu_offset + 1, 64);
|
|
|
|
int this_length = min(cacheline_end - gpu_offset, length);
|
|
|
|
int swizzled_gpu_offset = gpu_offset ^ 64;
|
|
|
|
|
|
|
|
if (is_read) {
|
|
|
|
memcpy(cpu_vaddr + cpu_offset,
|
|
|
|
gpu_vaddr + swizzled_gpu_offset,
|
|
|
|
this_length);
|
|
|
|
} else {
|
|
|
|
memcpy(gpu_vaddr + swizzled_gpu_offset,
|
|
|
|
cpu_vaddr + cpu_offset,
|
|
|
|
this_length);
|
|
|
|
}
|
|
|
|
cpu_offset += this_length;
|
|
|
|
gpu_offset += this_length;
|
|
|
|
length -= this_length;
|
|
|
|
}
|
|
|
|
|
2010-05-27 13:15:34 +00:00
|
|
|
kunmap(cpu_page);
|
|
|
|
kunmap(gpu_page);
|
2009-03-12 23:56:27 +00:00
|
|
|
}
|
|
|
|
|
2009-03-10 18:44:52 +00:00
|
|
|
/**
|
|
|
|
* This is the fast shmem pread path, which attempts to copy_from_user directly
|
|
|
|
* from the backing pages of the object to the user's address space. On a
|
|
|
|
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pread *args,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-10 18:44:52 +00:00
|
|
|
ssize_t remain;
|
|
|
|
loff_t offset, page_base;
|
|
|
|
char __user *user_data;
|
|
|
|
int page_offset, page_length;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
|
|
|
remain = args->size;
|
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-03-10 18:44:52 +00:00
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, 0);
|
2009-03-10 18:44:52 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto fail_unlock;
|
|
|
|
|
|
|
|
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
|
|
|
|
args->size);
|
|
|
|
if (ret != 0)
|
|
|
|
goto fail_put_pages;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2009-03-10 18:44:52 +00:00
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
|
|
|
* page_base = page offset within aperture
|
|
|
|
* page_offset = offset within page
|
|
|
|
* page_length = bytes to copy for this page
|
|
|
|
*/
|
|
|
|
page_base = (offset & ~(PAGE_SIZE-1));
|
|
|
|
page_offset = offset & (PAGE_SIZE-1);
|
|
|
|
page_length = remain;
|
|
|
|
if ((page_offset + remain) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - page_offset;
|
|
|
|
|
|
|
|
ret = fast_shmem_read(obj_priv->pages,
|
|
|
|
page_base, page_offset,
|
|
|
|
user_data, page_length);
|
|
|
|
if (ret)
|
|
|
|
goto fail_put_pages;
|
|
|
|
|
|
|
|
remain -= page_length;
|
|
|
|
user_data += page_length;
|
|
|
|
offset += page_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_put_pages:
|
|
|
|
i915_gem_object_put_pages(obj);
|
|
|
|
fail_unlock:
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-14 15:50:30 +00:00
|
|
|
static int
|
|
|
|
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
|
2009-09-14 15:50:30 +00:00
|
|
|
|
|
|
|
/* If we've insufficient memory to map in the pages, attempt
|
|
|
|
* to make some space by throwing out some old buffers.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOMEM) {
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
|
2010-08-07 10:01:21 +00:00
|
|
|
ret = i915_gem_evict_something(dev, obj->size,
|
|
|
|
i915_gem_get_gtt_alignment(obj));
|
2009-09-14 15:50:30 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, 0);
|
2009-09-14 15:50:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-03-10 18:44:52 +00:00
|
|
|
/**
|
|
|
|
* This is the fallback shmem pread path, which allocates temporary storage
|
|
|
|
* in kernel space to copy_to_user into outside of the struct_mutex, so we
|
|
|
|
* can copy out of the object's backing pages while holding the struct mutex
|
|
|
|
* and not take page faults.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pread *args,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-10 18:44:52 +00:00
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct page **user_pages;
|
|
|
|
ssize_t remain;
|
|
|
|
loff_t offset, pinned_pages, i;
|
|
|
|
loff_t first_data_page, last_data_page, num_pages;
|
|
|
|
int shmem_page_index, shmem_page_offset;
|
|
|
|
int data_page_index, data_page_offset;
|
|
|
|
int page_length;
|
|
|
|
int ret;
|
|
|
|
uint64_t data_ptr = args->data_ptr;
|
2009-03-12 23:56:27 +00:00
|
|
|
int do_bit17_swizzling;
|
2009-03-10 18:44:52 +00:00
|
|
|
|
|
|
|
remain = args->size;
|
|
|
|
|
|
|
|
/* Pin the user pages containing the data. We can't fault while
|
|
|
|
* holding the struct mutex, yet we want to hold it while
|
|
|
|
* dereferencing the user data.
|
|
|
|
*/
|
|
|
|
first_data_page = data_ptr / PAGE_SIZE;
|
|
|
|
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
|
|
|
|
num_pages = last_data_page - first_data_page + 1;
|
|
|
|
|
2009-05-08 23:13:25 +00:00
|
|
|
user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
|
2009-03-10 18:44:52 +00:00
|
|
|
if (user_pages == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
|
2009-04-07 23:01:22 +00:00
|
|
|
num_pages, 1, 0, user_pages, NULL);
|
2009-03-10 18:44:52 +00:00
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
if (pinned_pages < num_pages) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto fail_put_user_pages;
|
|
|
|
}
|
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret)
|
|
|
|
goto fail_put_user_pages;
|
2009-03-10 18:44:52 +00:00
|
|
|
|
2009-09-14 15:50:30 +00:00
|
|
|
ret = i915_gem_object_get_pages_or_evict(obj);
|
|
|
|
if (ret)
|
2009-03-10 18:44:52 +00:00
|
|
|
goto fail_unlock;
|
|
|
|
|
|
|
|
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
|
|
|
|
args->size);
|
|
|
|
if (ret != 0)
|
|
|
|
goto fail_put_pages;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2009-03-10 18:44:52 +00:00
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
|
|
|
* shmem_page_index = page number within shmem file
|
|
|
|
* shmem_page_offset = offset within page in shmem file
|
|
|
|
* data_page_index = page number in get_user_pages return
|
|
|
|
* data_page_offset = offset with data_page_index page.
|
|
|
|
* page_length = bytes to copy for this page
|
|
|
|
*/
|
|
|
|
shmem_page_index = offset / PAGE_SIZE;
|
|
|
|
shmem_page_offset = offset & ~PAGE_MASK;
|
|
|
|
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
|
|
|
data_page_offset = data_ptr & ~PAGE_MASK;
|
|
|
|
|
|
|
|
page_length = remain;
|
|
|
|
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - shmem_page_offset;
|
|
|
|
if ((data_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - data_page_offset;
|
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
if (do_bit17_swizzling) {
|
2010-05-27 13:15:34 +00:00
|
|
|
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
2009-03-12 23:56:27 +00:00
|
|
|
shmem_page_offset,
|
2010-05-27 13:15:34 +00:00
|
|
|
user_pages[data_page_index],
|
|
|
|
data_page_offset,
|
|
|
|
page_length,
|
|
|
|
1);
|
|
|
|
} else {
|
|
|
|
slow_shmem_copy(user_pages[data_page_index],
|
|
|
|
data_page_offset,
|
|
|
|
obj_priv->pages[shmem_page_index],
|
|
|
|
shmem_page_offset,
|
|
|
|
page_length);
|
2009-03-12 23:56:27 +00:00
|
|
|
}
|
2009-03-10 18:44:52 +00:00
|
|
|
|
|
|
|
remain -= page_length;
|
|
|
|
data_ptr += page_length;
|
|
|
|
offset += page_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_put_pages:
|
|
|
|
i915_gem_object_put_pages(obj);
|
|
|
|
fail_unlock:
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
fail_put_user_pages:
|
|
|
|
for (i = 0; i < pinned_pages; i++) {
|
|
|
|
SetPageDirty(user_pages[i]);
|
|
|
|
page_cache_release(user_pages[i]);
|
|
|
|
}
|
2009-05-08 23:13:25 +00:00
|
|
|
drm_free_large(user_pages);
|
2009-03-10 18:44:52 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/**
|
|
|
|
* Reads data from the object referenced by handle.
|
|
|
|
*
|
|
|
|
* On error, the contents of *data are undefined.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_pread *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2010-09-26 19:23:38 +00:00
|
|
|
int ret = 0;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-26 19:21:44 +00:00
|
|
|
/* Bounds check source. */
|
|
|
|
if (args->offset > obj->size || args->size > obj->size - args->offset) {
|
2010-09-26 19:50:05 +00:00
|
|
|
ret = -EINVAL;
|
2010-09-26 19:23:38 +00:00
|
|
|
goto out;
|
2010-09-26 19:50:05 +00:00
|
|
|
}
|
|
|
|
|
2010-09-26 19:23:38 +00:00
|
|
|
if (args->size == 0)
|
|
|
|
goto out;
|
|
|
|
|
2010-09-26 19:50:05 +00:00
|
|
|
if (!access_ok(VERIFY_WRITE,
|
|
|
|
(char __user *)(uintptr_t)args->data_ptr,
|
|
|
|
args->size)) {
|
|
|
|
ret = -EFAULT;
|
2010-09-26 19:23:38 +00:00
|
|
|
goto out;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-14 12:47:43 +00:00
|
|
|
ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
|
|
|
|
args->size);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
2009-03-10 18:44:52 +00:00
|
|
|
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
|
2009-03-12 23:56:27 +00:00
|
|
|
} else {
|
|
|
|
ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
|
|
|
|
if (ret != 0)
|
|
|
|
ret = i915_gem_shmem_pread_slow(dev, obj, args,
|
|
|
|
file_priv);
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-26 19:23:38 +00:00
|
|
|
out:
|
2010-02-09 05:49:12 +00:00
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
2009-03-10 18:44:52 +00:00
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-10-31 02:38:48 +00:00
|
|
|
/* This is the fast write path which cannot handle
|
|
|
|
* page faults in the source data
|
2008-10-20 21:16:43 +00:00
|
|
|
*/
|
2008-10-31 02:38:48 +00:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
fast_user_write(struct io_mapping *mapping,
|
|
|
|
loff_t page_base, int page_offset,
|
|
|
|
char __user *user_data,
|
|
|
|
int length)
|
2008-10-20 21:16:43 +00:00
|
|
|
{
|
|
|
|
char *vaddr_atomic;
|
2008-10-31 02:38:48 +00:00
|
|
|
unsigned long unwritten;
|
2008-10-20 21:16:43 +00:00
|
|
|
|
2010-08-04 13:34:24 +00:00
|
|
|
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
|
2008-10-31 02:38:48 +00:00
|
|
|
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
|
|
|
|
user_data, length);
|
2010-08-04 13:34:24 +00:00
|
|
|
io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
|
2010-10-14 14:03:58 +00:00
|
|
|
return unwritten;
|
2008-10-31 02:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Here's the write path which can sleep for
|
|
|
|
* page faults
|
|
|
|
*/
|
|
|
|
|
2010-05-27 13:15:35 +00:00
|
|
|
static inline void
|
2009-03-09 16:42:23 +00:00
|
|
|
slow_kernel_write(struct io_mapping *mapping,
|
|
|
|
loff_t gtt_base, int gtt_offset,
|
|
|
|
struct page *user_page, int user_offset,
|
|
|
|
int length)
|
2008-10-31 02:38:48 +00:00
|
|
|
{
|
2010-05-27 13:15:35 +00:00
|
|
|
char __iomem *dst_vaddr;
|
|
|
|
char *src_vaddr;
|
2008-10-31 02:38:48 +00:00
|
|
|
|
2010-05-27 13:15:35 +00:00
|
|
|
dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
|
|
|
|
src_vaddr = kmap(user_page);
|
|
|
|
|
|
|
|
memcpy_toio(dst_vaddr + gtt_offset,
|
|
|
|
src_vaddr + user_offset,
|
|
|
|
length);
|
|
|
|
|
|
|
|
kunmap(user_page);
|
|
|
|
io_mapping_unmap(dst_vaddr);
|
2008-10-20 21:16:43 +00:00
|
|
|
}
|
|
|
|
|
2009-03-09 20:42:30 +00:00
|
|
|
static inline int
|
|
|
|
fast_shmem_write(struct page **pages,
|
|
|
|
loff_t page_base, int page_offset,
|
|
|
|
char __user *data,
|
|
|
|
int length)
|
|
|
|
{
|
2010-10-14 12:47:43 +00:00
|
|
|
char *vaddr;
|
2010-10-14 14:03:58 +00:00
|
|
|
int ret;
|
2009-03-09 20:42:30 +00:00
|
|
|
|
|
|
|
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
|
2009-03-09 20:42:30 +00:00
|
|
|
kunmap_atomic(vaddr, KM_USER0);
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
return ret;
|
2009-03-09 20:42:30 +00:00
|
|
|
}
|
|
|
|
|
2009-03-09 16:42:23 +00:00
|
|
|
/**
|
|
|
|
* This is the fast pwrite path, where we copy the data directly from the
|
|
|
|
* user into the GTT, uncached.
|
|
|
|
*/
|
2008-07-30 19:06:12 +00:00
|
|
|
static int
|
2009-03-09 16:42:23 +00:00
|
|
|
i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-10-31 02:38:48 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2008-07-30 19:06:12 +00:00
|
|
|
ssize_t remain;
|
2008-10-31 02:38:48 +00:00
|
|
|
loff_t offset, page_base;
|
2008-07-30 19:06:12 +00:00
|
|
|
char __user *user_data;
|
2008-10-31 02:38:48 +00:00
|
|
|
int page_offset, page_length;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
|
|
|
remain = args->size;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
offset = obj_priv->gtt_offset + args->offset;
|
|
|
|
|
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
2008-10-31 02:38:48 +00:00
|
|
|
* page_base = page offset within aperture
|
|
|
|
* page_offset = offset within page
|
|
|
|
* page_length = bytes to copy for this page
|
2008-07-30 19:06:12 +00:00
|
|
|
*/
|
2008-10-31 02:38:48 +00:00
|
|
|
page_base = (offset & ~(PAGE_SIZE-1));
|
|
|
|
page_offset = offset & (PAGE_SIZE-1);
|
|
|
|
page_length = remain;
|
|
|
|
if ((page_offset + remain) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - page_offset;
|
|
|
|
|
|
|
|
/* If we get a fault while copying data, then (presumably) our
|
2009-03-09 16:42:23 +00:00
|
|
|
* source page isn't available. Return the error and we'll
|
|
|
|
* retry in the slow path.
|
2008-10-31 02:38:48 +00:00
|
|
|
*/
|
2010-10-14 14:03:58 +00:00
|
|
|
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
|
|
|
|
page_offset, user_data, page_length))
|
|
|
|
|
|
|
|
return -EFAULT;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2008-10-31 02:38:48 +00:00
|
|
|
remain -= page_length;
|
|
|
|
user_data += page_length;
|
|
|
|
offset += page_length;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
return 0;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2009-03-09 16:42:23 +00:00
|
|
|
/**
|
|
|
|
* This is the fallback GTT pwrite path, which uses get_user_pages to pin
|
|
|
|
* the memory and maps it using kmap_atomic for copying.
|
|
|
|
*
|
|
|
|
* This code resulted in x11perf -rgb10text consuming about 10% more CPU
|
|
|
|
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
|
|
|
|
*/
|
2008-10-02 19:24:47 +00:00
|
|
|
static int
|
2009-03-09 16:42:23 +00:00
|
|
|
i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-09 16:42:23 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
ssize_t remain;
|
|
|
|
loff_t gtt_page_base, offset;
|
|
|
|
loff_t first_data_page, last_data_page, num_pages;
|
|
|
|
loff_t pinned_pages, i;
|
|
|
|
struct page **user_pages;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
int gtt_page_offset, data_page_offset, data_page_index, page_length;
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret;
|
2009-03-09 16:42:23 +00:00
|
|
|
uint64_t data_ptr = args->data_ptr;
|
|
|
|
|
|
|
|
remain = args->size;
|
|
|
|
|
|
|
|
/* Pin the user pages containing the data. We can't fault while
|
|
|
|
* holding the struct mutex, and all of the pwrite implementations
|
|
|
|
* want to hold it while dereferencing the user data.
|
|
|
|
*/
|
|
|
|
first_data_page = data_ptr / PAGE_SIZE;
|
|
|
|
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
|
|
|
|
num_pages = last_data_page - first_data_page + 1;
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
|
2009-03-09 16:42:23 +00:00
|
|
|
if (user_pages == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2009-03-09 16:42:23 +00:00
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
|
|
|
|
num_pages, 0, 0, user_pages, NULL);
|
|
|
|
up_read(&mm->mmap_sem);
|
2010-10-14 14:03:58 +00:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2009-03-09 16:42:23 +00:00
|
|
|
if (pinned_pages < num_pages) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out_unpin_pages;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-09 16:42:23 +00:00
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
|
|
if (ret)
|
2010-10-14 14:03:58 +00:00
|
|
|
goto out_unpin_pages;
|
2009-03-09 16:42:23 +00:00
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2009-03-09 16:42:23 +00:00
|
|
|
offset = obj_priv->gtt_offset + args->offset;
|
|
|
|
|
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
|
|
|
* gtt_page_base = page offset within aperture
|
|
|
|
* gtt_page_offset = offset within page in aperture
|
|
|
|
* data_page_index = page number in get_user_pages return
|
|
|
|
* data_page_offset = offset with data_page_index page.
|
|
|
|
* page_length = bytes to copy for this page
|
|
|
|
*/
|
|
|
|
gtt_page_base = offset & PAGE_MASK;
|
|
|
|
gtt_page_offset = offset & ~PAGE_MASK;
|
|
|
|
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
|
|
|
data_page_offset = data_ptr & ~PAGE_MASK;
|
|
|
|
|
|
|
|
page_length = remain;
|
|
|
|
if ((gtt_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - gtt_page_offset;
|
|
|
|
if ((data_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - data_page_offset;
|
|
|
|
|
2010-05-27 13:15:35 +00:00
|
|
|
slow_kernel_write(dev_priv->mm.gtt_mapping,
|
|
|
|
gtt_page_base, gtt_page_offset,
|
|
|
|
user_pages[data_page_index],
|
|
|
|
data_page_offset,
|
|
|
|
page_length);
|
2009-03-09 16:42:23 +00:00
|
|
|
|
|
|
|
remain -= page_length;
|
|
|
|
offset += page_length;
|
|
|
|
data_ptr += page_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unpin_pages:
|
|
|
|
for (i = 0; i < pinned_pages; i++)
|
|
|
|
page_cache_release(user_pages[i]);
|
2009-05-08 23:13:25 +00:00
|
|
|
drm_free_large(user_pages);
|
2009-03-09 16:42:23 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-03-09 20:42:30 +00:00
|
|
|
/**
|
|
|
|
* This is the fast shmem pwrite path, which attempts to directly
|
|
|
|
* copy_from_user into the kmapped pages backing the object.
|
|
|
|
*/
|
2008-10-02 19:24:47 +00:00
|
|
|
static int
|
2009-03-09 20:42:30 +00:00
|
|
|
i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-09 20:42:30 +00:00
|
|
|
ssize_t remain;
|
|
|
|
loff_t offset, page_base;
|
|
|
|
char __user *user_data;
|
|
|
|
int page_offset, page_length;
|
|
|
|
|
|
|
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
|
|
|
remain = args->size;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2009-03-09 20:42:30 +00:00
|
|
|
offset = args->offset;
|
|
|
|
obj_priv->dirty = 1;
|
|
|
|
|
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
|
|
|
* page_base = page offset within aperture
|
|
|
|
* page_offset = offset within page
|
|
|
|
* page_length = bytes to copy for this page
|
|
|
|
*/
|
|
|
|
page_base = (offset & ~(PAGE_SIZE-1));
|
|
|
|
page_offset = offset & (PAGE_SIZE-1);
|
|
|
|
page_length = remain;
|
|
|
|
if ((page_offset + remain) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - page_offset;
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
if (fast_shmem_write(obj_priv->pages,
|
2009-03-09 20:42:30 +00:00
|
|
|
page_base, page_offset,
|
2010-10-14 14:03:58 +00:00
|
|
|
user_data, page_length))
|
|
|
|
return -EFAULT;
|
2009-03-09 20:42:30 +00:00
|
|
|
|
|
|
|
remain -= page_length;
|
|
|
|
user_data += page_length;
|
|
|
|
offset += page_length;
|
|
|
|
}
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
return 0;
|
2009-03-09 20:42:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This is the fallback shmem pwrite path, which uses get_user_pages to pin
|
|
|
|
* the memory and maps it using kmap_atomic for copying.
|
|
|
|
*
|
|
|
|
* This avoids taking mmap_sem for faulting on the user's address while the
|
|
|
|
* struct_mutex is held.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-03-09 20:42:30 +00:00
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct page **user_pages;
|
|
|
|
ssize_t remain;
|
|
|
|
loff_t offset, pinned_pages, i;
|
|
|
|
loff_t first_data_page, last_data_page, num_pages;
|
|
|
|
int shmem_page_index, shmem_page_offset;
|
|
|
|
int data_page_index, data_page_offset;
|
|
|
|
int page_length;
|
|
|
|
int ret;
|
|
|
|
uint64_t data_ptr = args->data_ptr;
|
2009-03-12 23:56:27 +00:00
|
|
|
int do_bit17_swizzling;
|
2009-03-09 20:42:30 +00:00
|
|
|
|
|
|
|
remain = args->size;
|
|
|
|
|
|
|
|
/* Pin the user pages containing the data. We can't fault while
|
|
|
|
* holding the struct mutex, and all of the pwrite implementations
|
|
|
|
* want to hold it while dereferencing the user data.
|
|
|
|
*/
|
|
|
|
first_data_page = data_ptr / PAGE_SIZE;
|
|
|
|
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
|
|
|
|
num_pages = last_data_page - first_data_page + 1;
|
|
|
|
|
2009-05-08 23:13:25 +00:00
|
|
|
user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
|
2009-03-09 20:42:30 +00:00
|
|
|
if (user_pages == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2009-03-09 20:42:30 +00:00
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
|
|
|
|
num_pages, 0, 0, user_pages, NULL);
|
|
|
|
up_read(&mm->mmap_sem);
|
2010-10-14 14:03:58 +00:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2009-03-09 20:42:30 +00:00
|
|
|
if (pinned_pages < num_pages) {
|
|
|
|
ret = -EFAULT;
|
2010-10-14 14:03:58 +00:00
|
|
|
goto out;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
2009-09-14 15:50:30 +00:00
|
|
|
if (ret)
|
2010-10-14 14:03:58 +00:00
|
|
|
goto out;
|
2009-03-09 20:42:30 +00:00
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
2009-03-09 20:42:30 +00:00
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
offset = args->offset;
|
2009-03-09 20:42:30 +00:00
|
|
|
obj_priv->dirty = 1;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-09 20:42:30 +00:00
|
|
|
while (remain > 0) {
|
|
|
|
/* Operation in this page
|
|
|
|
*
|
|
|
|
* shmem_page_index = page number within shmem file
|
|
|
|
* shmem_page_offset = offset within page in shmem file
|
|
|
|
* data_page_index = page number in get_user_pages return
|
|
|
|
* data_page_offset = offset with data_page_index page.
|
|
|
|
* page_length = bytes to copy for this page
|
|
|
|
*/
|
|
|
|
shmem_page_index = offset / PAGE_SIZE;
|
|
|
|
shmem_page_offset = offset & ~PAGE_MASK;
|
|
|
|
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
|
|
|
data_page_offset = data_ptr & ~PAGE_MASK;
|
|
|
|
|
|
|
|
page_length = remain;
|
|
|
|
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - shmem_page_offset;
|
|
|
|
if ((data_page_offset + page_length) > PAGE_SIZE)
|
|
|
|
page_length = PAGE_SIZE - data_page_offset;
|
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
if (do_bit17_swizzling) {
|
2010-05-27 13:15:34 +00:00
|
|
|
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
2009-03-12 23:56:27 +00:00
|
|
|
shmem_page_offset,
|
|
|
|
user_pages[data_page_index],
|
|
|
|
data_page_offset,
|
2010-05-27 13:15:34 +00:00
|
|
|
page_length,
|
|
|
|
0);
|
|
|
|
} else {
|
|
|
|
slow_shmem_copy(obj_priv->pages[shmem_page_index],
|
|
|
|
shmem_page_offset,
|
|
|
|
user_pages[data_page_index],
|
|
|
|
data_page_offset,
|
|
|
|
page_length);
|
2009-03-12 23:56:27 +00:00
|
|
|
}
|
2009-03-09 20:42:30 +00:00
|
|
|
|
|
|
|
remain -= page_length;
|
|
|
|
data_ptr += page_length;
|
|
|
|
offset += page_length;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
out:
|
2009-03-09 20:42:30 +00:00
|
|
|
for (i = 0; i < pinned_pages; i++)
|
|
|
|
page_cache_release(user_pages[i]);
|
2009-05-08 23:13:25 +00:00
|
|
|
drm_free_large(user_pages);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-09 20:42:30 +00:00
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Writes data to the object referenced by handle.
|
|
|
|
*
|
|
|
|
* On error, the contents of the buffer that were to be modified are undefined.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
2010-10-14 14:03:58 +00:00
|
|
|
struct drm_file *file)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_pwrite *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret = 0;
|
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
obj = drm_gem_object_lookup(dev, file, args->handle);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-09-26 19:21:44 +00:00
|
|
|
/* Bounds check destination. */
|
|
|
|
if (args->offset > obj->size || args->size > obj->size - args->offset) {
|
2010-09-26 19:50:05 +00:00
|
|
|
ret = -EINVAL;
|
2010-09-26 19:23:38 +00:00
|
|
|
goto out;
|
2010-09-26 19:50:05 +00:00
|
|
|
}
|
|
|
|
|
2010-09-26 19:23:38 +00:00
|
|
|
if (args->size == 0)
|
|
|
|
goto out;
|
|
|
|
|
2010-09-26 19:50:05 +00:00
|
|
|
if (!access_ok(VERIFY_READ,
|
|
|
|
(char __user *)(uintptr_t)args->data_ptr,
|
|
|
|
args->size)) {
|
|
|
|
ret = -EFAULT;
|
2010-09-26 19:23:38 +00:00
|
|
|
goto out;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-14 12:47:43 +00:00
|
|
|
ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
|
|
|
|
args->size);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* We can only do the GTT pwrite on untiled buffers, as otherwise
|
|
|
|
* it would end up going through the fenced access, and we'll get
|
|
|
|
* different detiling behavior between reading and writing.
|
|
|
|
* pread/pwrite currently are reading and writing from the CPU
|
|
|
|
* perspective, requiring manual detiling by the client.
|
|
|
|
*/
|
2008-12-30 10:31:46 +00:00
|
|
|
if (obj_priv->phys_obj)
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_gem_phys_pwrite(dev, obj, args, file);
|
2008-12-30 10:31:46 +00:00
|
|
|
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
|
2010-09-27 14:51:07 +00:00
|
|
|
obj_priv->gtt_space &&
|
2010-05-27 13:21:01 +00:00
|
|
|
obj->write_domain != I915_GEM_DOMAIN_CPU) {
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_gem_object_pin(obj, 0);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
|
|
if (ret)
|
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
|
|
|
|
if (ret == -EFAULT)
|
|
|
|
ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
|
|
|
|
|
|
|
|
out_unpin:
|
|
|
|
i915_gem_object_unpin(obj);
|
2009-03-09 20:42:30 +00:00
|
|
|
} else {
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_gem_object_get_pages_or_evict(obj);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-10-14 14:03:58 +00:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
|
|
|
if (ret)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (!i915_gem_object_needs_bit17_swizzle(obj))
|
|
|
|
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
|
|
|
|
if (ret == -EFAULT)
|
|
|
|
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
|
|
|
|
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put_pages(obj);
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-26 19:23:38 +00:00
|
|
|
out:
|
2010-10-14 14:03:58 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2008-11-10 18:53:25 +00:00
|
|
|
* Called when user space prepares to use an object with the CPU, either
|
|
|
|
* through the mmap ioctl's mapping or a GTT mapping.
|
2008-07-30 19:06:12 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2009-08-29 19:49:51 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2008-07-30 19:06:12 +00:00
|
|
|
struct drm_i915_gem_set_domain *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
2009-08-17 20:31:43 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2008-11-10 18:53:25 +00:00
|
|
|
uint32_t read_domains = args->read_domains;
|
|
|
|
uint32_t write_domain = args->write_domain;
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2008-11-10 18:53:25 +00:00
|
|
|
/* Only handle setting domains to types used by the CPU. */
|
2009-06-06 08:46:02 +00:00
|
|
|
if (write_domain & I915_GEM_GPU_DOMAINS)
|
2008-11-10 18:53:25 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2009-06-06 08:46:02 +00:00
|
|
|
if (read_domains & I915_GEM_GPU_DOMAINS)
|
2008-11-10 18:53:25 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Having something in the write domain implies it's in the read
|
|
|
|
* domain, and only that read domain. Enforce that in the request.
|
|
|
|
*/
|
|
|
|
if (write_domain != 0 && read_domains != write_domain)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-08-17 20:31:43 +00:00
|
|
|
|
|
|
|
intel_mark_busy(dev, obj);
|
|
|
|
|
2008-11-10 18:53:25 +00:00
|
|
|
if (read_domains & I915_GEM_DOMAIN_GTT) {
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
|
2008-11-26 21:58:13 +00:00
|
|
|
|
2009-08-29 19:49:51 +00:00
|
|
|
/* Update the LRU on the fence for the CPU access that's
|
|
|
|
* about to occur.
|
|
|
|
*/
|
|
|
|
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
|
2010-04-28 09:02:31 +00:00
|
|
|
struct drm_i915_fence_reg *reg =
|
|
|
|
&dev_priv->fence_regs[obj_priv->fence_reg];
|
|
|
|
list_move_tail(®->lru_list,
|
2009-08-29 19:49:51 +00:00
|
|
|
&dev_priv->mm.fence_list);
|
|
|
|
}
|
|
|
|
|
2008-11-26 21:58:13 +00:00
|
|
|
/* Silently promote "you're not bound, there was nothing to do"
|
|
|
|
* to success, since the client was just asking us to
|
|
|
|
* make sure everything was done.
|
|
|
|
*/
|
|
|
|
if (ret == -EINVAL)
|
|
|
|
ret = 0;
|
2008-11-10 18:53:25 +00:00
|
|
|
} else {
|
2008-11-14 21:35:19 +00:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
|
2008-11-10 18:53:25 +00:00
|
|
|
}
|
|
|
|
|
2010-08-07 20:45:03 +00:00
|
|
|
/* Maintain LRU order of "inactive" objects */
|
|
|
|
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Called when user space has done writes to this buffer
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_sw_finish *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
2010-09-25 10:22:51 +00:00
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2010-09-25 10:22:51 +00:00
|
|
|
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pinned buffers may be scanout, so flush the cache */
|
2010-09-29 10:39:53 +00:00
|
|
|
if (to_intel_bo(obj)->pin_count)
|
2008-11-14 21:35:19 +00:00
|
|
|
i915_gem_object_flush_cpu_write_domain(obj);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Maps the contents of an object, returning the address it is mapped
|
|
|
|
* into.
|
|
|
|
*
|
|
|
|
* While the mapping holds a reference on the contents of the object, it doesn't
|
|
|
|
* imply a ref on the object itself.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_mmap *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
loff_t offset;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
addr = do_mmap(obj->filp, 0, args->size,
|
|
|
|
PROT_READ | PROT_WRITE, MAP_SHARED,
|
|
|
|
args->offset);
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
2010-02-09 05:49:12 +00:00
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (IS_ERR((void *)addr))
|
|
|
|
return addr;
|
|
|
|
|
|
|
|
args->addr_ptr = (uint64_t) addr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_fault - fault a page into the GTT
|
|
|
|
* vma: VMA in question
|
|
|
|
* vmf: fault info
|
|
|
|
*
|
|
|
|
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
|
|
|
|
* from userspace. The fault handler takes care of binding the object to
|
|
|
|
* the GTT (if needed), allocating and programming a fence register (again,
|
|
|
|
* only if needed based on whether the old reg is still valid or the object
|
|
|
|
* is tiled) and inserting a new PTE into the faulting process.
|
|
|
|
*
|
|
|
|
* Note that the faulting process may involve evicting existing objects
|
|
|
|
* from the GTT and/or fence registers to make room. So performance may
|
|
|
|
* suffer if the GTT working set is large or there are few fence registers
|
|
|
|
* left.
|
|
|
|
*/
|
|
|
|
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-08-07 20:45:03 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
pgoff_t page_offset;
|
|
|
|
unsigned long pfn;
|
|
|
|
int ret = 0;
|
2009-01-27 01:10:45 +00:00
|
|
|
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
|
2008-11-12 18:03:55 +00:00
|
|
|
|
|
|
|
/* We don't use vmf->pgoff since that has the fake offset */
|
|
|
|
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
|
|
|
|
PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* Now bind it into the GTT if needed */
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
if (!obj_priv->gtt_space) {
|
2009-09-14 15:50:26 +00:00
|
|
|
ret = i915_gem_object_bind_to_gtt(obj, 0);
|
2009-09-22 23:43:56 +00:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
2009-05-27 18:37:28 +00:00
|
|
|
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, write);
|
2009-09-22 23:43:56 +00:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Need a new fence register? */
|
2009-08-29 19:49:51 +00:00
|
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_get_fence_reg(obj, true);
|
2009-09-22 23:43:56 +00:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
2009-01-27 18:33:49 +00:00
|
|
|
}
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-08-07 20:45:03 +00:00
|
|
|
if (i915_gem_object_is_inactive(obj_priv))
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
|
|
|
|
page_offset;
|
|
|
|
|
|
|
|
/* Finally, remap it using the new GTT offset */
|
|
|
|
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
2009-09-22 23:43:56 +00:00
|
|
|
unlock:
|
2008-11-12 18:03:55 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
switch (ret) {
|
2009-09-22 23:43:56 +00:00
|
|
|
case 0:
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
return VM_FAULT_NOPAGE;
|
2008-11-12 18:03:55 +00:00
|
|
|
case -ENOMEM:
|
|
|
|
case -EAGAIN:
|
|
|
|
return VM_FAULT_OOM;
|
|
|
|
default:
|
2009-09-22 23:43:56 +00:00
|
|
|
return VM_FAULT_SIGBUS;
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_create_mmap_offset - create a fake mmap offset for an object
|
|
|
|
* @obj: obj in question
|
|
|
|
*
|
|
|
|
* GEM memory mapping works by handing back to userspace a fake mmap offset
|
|
|
|
* it can use in a subsequent mmap(2) call. The DRM core code then looks
|
|
|
|
* up the object based on the offset and sets up the various memory mapping
|
|
|
|
* structures.
|
|
|
|
*
|
|
|
|
* This routine allocates and attaches a fake offset for @obj.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_create_mmap_offset(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
struct drm_gem_mm *mm = dev->mm_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
struct drm_map_list *list;
|
2009-02-02 05:55:46 +00:00
|
|
|
struct drm_local_map *map;
|
2008-11-12 18:03:55 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Set the object up for mmap'ing */
|
|
|
|
list = &obj->map_list;
|
2009-03-24 19:23:04 +00:00
|
|
|
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
|
2008-11-12 18:03:55 +00:00
|
|
|
if (!list->map)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
map = list->map;
|
|
|
|
map->type = _DRM_GEM;
|
|
|
|
map->size = obj->size;
|
|
|
|
map->handle = obj;
|
|
|
|
|
|
|
|
/* Get a DRM GEM mmap offset allocated... */
|
|
|
|
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
|
|
|
|
obj->size / PAGE_SIZE, 0, 0);
|
|
|
|
if (!list->file_offset_node) {
|
|
|
|
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
|
2010-09-21 14:05:24 +00:00
|
|
|
ret = -ENOSPC;
|
2008-11-12 18:03:55 +00:00
|
|
|
goto out_free_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
|
|
|
|
obj->size / PAGE_SIZE, 0);
|
|
|
|
if (!list->file_offset_node) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
list->hash.key = list->file_offset_node->start;
|
2010-09-21 14:05:24 +00:00
|
|
|
ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
|
|
|
|
if (ret) {
|
2008-11-12 18:03:55 +00:00
|
|
|
DRM_ERROR("failed to add to map hash\n");
|
|
|
|
goto out_free_mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* By now we should be all set, any drm_mmap request on the offset
|
|
|
|
* below will get to our mmap & fault handler */
|
|
|
|
obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_mm:
|
|
|
|
drm_mm_put_block(list->file_offset_node);
|
|
|
|
out_free_list:
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(list->map);
|
2008-11-12 18:03:55 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-07-10 07:18:50 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_release_mmap - remove physical page mappings
|
|
|
|
* @obj: obj in question
|
|
|
|
*
|
tree-wide: fix assorted typos all over the place
That is "success", "unknown", "through", "performance", "[re|un]mapping"
, "access", "default", "reasonable", "[con]currently", "temperature"
, "channel", "[un]used", "application", "example","hierarchy", "therefore"
, "[over|under]flow", "contiguous", "threshold", "enough" and others.
Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2009-11-14 15:09:05 +00:00
|
|
|
* Preserve the reservation of the mmapping with the DRM core code, but
|
2009-07-10 07:18:50 +00:00
|
|
|
* relinquish ownership of the pages back to the system.
|
|
|
|
*
|
|
|
|
* It is vital that we remove the page mapping if we have mapped a tiled
|
|
|
|
* object through the GTT and then lose the fence register due to
|
|
|
|
* resource pressure. Similarly if the object has been moved out of the
|
|
|
|
* aperture, than pages mapped into userspace must be revoked. Removing the
|
|
|
|
* mapping will then trigger a page fault on the next user access, allowing
|
|
|
|
* fixup by i915_gem_fault().
|
|
|
|
*/
|
2009-07-10 20:02:26 +00:00
|
|
|
void
|
2009-07-10 07:18:50 +00:00
|
|
|
i915_gem_release_mmap(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-07-10 07:18:50 +00:00
|
|
|
|
|
|
|
if (dev->dev_mapping)
|
|
|
|
unmap_mapping_range(dev->dev_mapping,
|
|
|
|
obj_priv->mmap_offset, obj->size, 1);
|
|
|
|
}
|
|
|
|
|
2009-02-11 22:01:46 +00:00
|
|
|
static void
|
|
|
|
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-02-11 22:01:46 +00:00
|
|
|
struct drm_gem_mm *mm = dev->mm_private;
|
|
|
|
struct drm_map_list *list;
|
|
|
|
|
|
|
|
list = &obj->map_list;
|
|
|
|
drm_ht_remove_item(&mm->offset_hash, &list->hash);
|
|
|
|
|
|
|
|
if (list->file_offset_node) {
|
|
|
|
drm_mm_put_block(list->file_offset_node);
|
|
|
|
list->file_offset_node = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list->map) {
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(list->map);
|
2009-02-11 22:01:46 +00:00
|
|
|
list->map = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj_priv->mmap_offset = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_get_gtt_alignment - return required GTT alignment for an object
|
|
|
|
* @obj: object to check
|
|
|
|
*
|
|
|
|
* Return the required GTT alignment for an object, taking into account
|
|
|
|
* potential fence register mapping if needed.
|
|
|
|
*/
|
|
|
|
static uint32_t
|
|
|
|
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
int start, i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimum alignment is 4k (GTT page size), but might be greater
|
|
|
|
* if a fence register is needed for the object.
|
|
|
|
*/
|
2010-09-16 23:32:17 +00:00
|
|
|
if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
|
2008-11-12 18:03:55 +00:00
|
|
|
return 4096;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Previous chips need to be aligned to the size of the smallest
|
|
|
|
* fence register that can contain the object.
|
|
|
|
*/
|
2010-09-16 23:32:17 +00:00
|
|
|
if (INTEL_INFO(dev)->gen == 3)
|
2008-11-12 18:03:55 +00:00
|
|
|
start = 1024*1024;
|
|
|
|
else
|
|
|
|
start = 512*1024;
|
|
|
|
|
|
|
|
for (i = start; i < obj->size; i <<= 1)
|
|
|
|
;
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
|
|
|
|
* @dev: DRM device
|
|
|
|
* @data: GTT mapping ioctl data
|
|
|
|
* @file_priv: GEM object info
|
|
|
|
*
|
|
|
|
* Simply returns the fake offset to userspace so it can mmap it.
|
|
|
|
* The mmap call will end up in drm_gem_mmap(), which will set things
|
|
|
|
* up so we can get faults in the handler above.
|
|
|
|
*
|
|
|
|
* The fault handler will take care of binding the object into the GTT
|
|
|
|
* (since it may have been evicted to make room for something), allocating
|
|
|
|
* a fence register, and mapping the appropriate aperture address into
|
|
|
|
* userspace.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_mmap_gtt *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2009-09-22 17:46:17 +00:00
|
|
|
if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
|
|
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
if (!obj_priv->mmap_offset) {
|
|
|
|
ret = i915_gem_create_mmap_offset(obj);
|
2009-02-11 14:26:31 +00:00
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-11-12 18:03:55 +00:00
|
|
|
return ret;
|
2009-02-11 14:26:31 +00:00
|
|
|
}
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
args->offset = obj_priv->mmap_offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pull it into the GTT so that we have a page list (makes the
|
|
|
|
* initial fault faster and any subsequent flushing possible).
|
|
|
|
*/
|
|
|
|
if (!obj_priv->agp_mem) {
|
2009-09-14 15:50:26 +00:00
|
|
|
ret = i915_gem_object_bind_to_gtt(obj, 0);
|
2008-11-12 18:03:55 +00:00
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-09-27 14:51:07 +00:00
|
|
|
static void
|
2009-03-19 21:10:50 +00:00
|
|
|
i915_gem_object_put_pages(struct drm_gem_object *obj)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
int page_count = obj->size / PAGE_SIZE;
|
|
|
|
int i;
|
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
BUG_ON(obj_priv->pages_refcount == 0);
|
2009-09-22 13:24:13 +00:00
|
|
|
BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
if (--obj_priv->pages_refcount != 0)
|
|
|
|
return;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-12 23:56:27 +00:00
|
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
|
|
|
i915_gem_object_save_bit_17_swizzle(obj);
|
|
|
|
|
2009-09-14 15:50:29 +00:00
|
|
|
if (obj_priv->madv == I915_MADV_DONTNEED)
|
2009-09-20 22:03:19 +00:00
|
|
|
obj_priv->dirty = 0;
|
2009-09-14 15:50:29 +00:00
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++) {
|
|
|
|
if (obj_priv->dirty)
|
|
|
|
set_page_dirty(obj_priv->pages[i]);
|
|
|
|
|
|
|
|
if (obj_priv->madv == I915_MADV_WILLNEED)
|
2009-03-19 21:10:50 +00:00
|
|
|
mark_page_accessed(obj_priv->pages[i]);
|
2009-09-14 15:50:29 +00:00
|
|
|
|
|
|
|
page_cache_release(obj_priv->pages[i]);
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
obj_priv->dirty = 0;
|
|
|
|
|
2009-05-08 23:13:25 +00:00
|
|
|
drm_free_large(obj_priv->pages);
|
2009-03-19 21:10:50 +00:00
|
|
|
obj_priv->pages = NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-28 09:07:56 +00:00
|
|
|
static uint32_t
|
|
|
|
i915_gem_next_request_seqno(struct drm_device *dev,
|
|
|
|
struct intel_ring_buffer *ring)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
ring->outstanding_lazy_request = true;
|
|
|
|
return dev_priv->next_seqno;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
static void
|
2010-02-11 21:16:02 +00:00
|
|
|
i915_gem_object_move_to_active(struct drm_gem_object *obj,
|
2010-05-21 01:08:56 +00:00
|
|
|
struct intel_ring_buffer *ring)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-09-28 09:07:56 +00:00
|
|
|
struct drm_device *dev = obj->dev;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-09-28 09:07:56 +00:00
|
|
|
uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
|
2010-02-11 21:16:02 +00:00
|
|
|
|
2010-05-21 01:08:56 +00:00
|
|
|
BUG_ON(ring == NULL);
|
|
|
|
obj_priv->ring = ring;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* Add a reference if we're newly entering the active list. */
|
|
|
|
if (!obj_priv->active) {
|
|
|
|
drm_gem_object_reference(obj);
|
|
|
|
obj_priv->active = 1;
|
|
|
|
}
|
drm/i915: allow lazy emitting of requests
Sometimes (like when flushing in preparation of batchbuffer execution)
we know that we'll emit a request but haven't yet done so. Allow this
case by simply taking the next seqno by default. Ensure that a request
is eventually emitted before waiting for an request by issuing it
in i915_wait_request iff this is not yet done.
Also replace one open-coded version of i915_gem_object_wait_rendering,
to prevent future code-diversion.
Chris Wilson asked me to explain and clarify what this patch does and why.
Here it goes:
Old way of moving objects onto the active list and associating them with a
reques:
1. i915_add_request + store the returned seqno somewhere
2. i915_gem_object_move_to_active (with the stored seqno as parameter)
For the current users, this is all fine. But I'd like to associate objects
(and fence regs) with the batchbuffer request deep down in the execbuf
call-chain. I thought about three ways of implementing this.
a) Don't care, just emit request when we need a new seqno. When heavily
pipelining fence reg changes, this would have caused tons of superflous
request (and corresponding irqs).
b) Thread all changed fences, objects, whatever through the execbuf-maze,
so that when we emit a request, we can store the new seqno at all the right
places.
c) Kill that seqno-threading-around business by simply storing the next
seqno, i.e. allow 2. to be done before 1. in the above sequence.
I've decided to implement c) (in this patch). The following patches are
just fall-out that resulted from this small conceptual change.
* We can handle the flushing list processing where we actually emit a flush
(i915_gem_flush and i915_retire_commands) instead of in i915_add_request.
The code makes IMHO more sense this way (and i915_add_request looses the
flush_domains parameter, obviously).
* We can avoid emitting unnecessary requests. IMHO there's no point in
emitting more than one request per batchbuffer (with or without an
corresponding irq).
* By enforcing 2. before 1. ordering in the above sequence the seqno
argument of i915_gem_object_move_to_active is redundant and can be
dropped.
v2: Now i915_wait_request issues request if it is not yet emitted.
Also introduce i915_gem_next_request_seqno(dev) just in case we ever
need to do some prep work before using a new seqno.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[ickle: Keep i915_gem_object_set_to_display_plane() uninterruptible.]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-02-11 21:13:59 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* Move from whatever list we were on to the tail of execution. */
|
2010-05-21 01:08:56 +00:00
|
|
|
list_move_tail(&obj_priv->list, &ring->active_list);
|
2010-09-28 09:07:56 +00:00
|
|
|
obj_priv->last_rendering_seqno = seqno;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-07 00:00:31 +00:00
|
|
|
static void
|
|
|
|
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-07 00:00:31 +00:00
|
|
|
|
|
|
|
BUG_ON(!obj_priv->active);
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
|
|
|
|
obj_priv->last_rendering_seqno = 0;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-09-20 22:03:54 +00:00
|
|
|
/* Immediately discard the backing storage */
|
|
|
|
static void
|
|
|
|
i915_gem_object_truncate(struct drm_gem_object *obj)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-09-22 13:24:13 +00:00
|
|
|
struct inode *inode;
|
2009-09-20 22:03:54 +00:00
|
|
|
|
2010-08-07 10:01:30 +00:00
|
|
|
/* Our goal here is to return as much of the memory as
|
|
|
|
* is possible back to the system as we are called from OOM.
|
|
|
|
* To do this we must instruct the shmfs to drop all of its
|
|
|
|
* backing pages, *now*. Here we mirror the actions taken
|
|
|
|
* when by shmem_delete_inode() to release the backing store.
|
|
|
|
*/
|
2009-09-22 13:24:13 +00:00
|
|
|
inode = obj->filp->f_path.dentry->d_inode;
|
2010-08-07 10:01:30 +00:00
|
|
|
truncate_inode_pages(inode->i_mapping, 0);
|
|
|
|
if (inode->i_op->truncate_range)
|
|
|
|
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
|
2009-09-22 13:24:13 +00:00
|
|
|
|
|
|
|
obj_priv->madv = __I915_MADV_PURGED;
|
2009-09-20 22:03:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
|
|
|
|
{
|
|
|
|
return obj_priv->madv == I915_MADV_DONTNEED;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
static void
|
|
|
|
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
if (obj_priv->pin_count != 0)
|
2010-09-20 16:36:15 +00:00
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
|
2008-07-30 19:06:12 +00:00
|
|
|
else
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
|
|
2010-02-07 15:20:18 +00:00
|
|
|
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
|
|
|
|
|
2008-11-07 00:00:31 +00:00
|
|
|
obj_priv->last_rendering_seqno = 0;
|
2010-05-21 01:08:56 +00:00
|
|
|
obj_priv->ring = NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj_priv->active) {
|
|
|
|
obj_priv->active = 0;
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
}
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-18 10:02:01 +00:00
|
|
|
static void
|
2010-02-19 10:51:59 +00:00
|
|
|
i915_gem_process_flushing_list(struct drm_device *dev,
|
2010-02-11 21:29:04 +00:00
|
|
|
uint32_t flush_domains,
|
2010-05-21 01:08:56 +00:00
|
|
|
struct intel_ring_buffer *ring)
|
2010-02-19 10:51:59 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv, *next;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(obj_priv, next,
|
|
|
|
&dev_priv->mm.gpu_write_list,
|
|
|
|
gpu_write_list) {
|
2010-04-09 19:05:09 +00:00
|
|
|
struct drm_gem_object *obj = &obj_priv->base;
|
2010-02-19 10:51:59 +00:00
|
|
|
|
2010-09-14 16:04:02 +00:00
|
|
|
if (obj->write_domain & flush_domains &&
|
|
|
|
obj_priv->ring == ring) {
|
2010-02-19 10:51:59 +00:00
|
|
|
uint32_t old_write_domain = obj->write_domain;
|
|
|
|
|
|
|
|
obj->write_domain = 0;
|
|
|
|
list_del_init(&obj_priv->gpu_write_list);
|
2010-02-11 21:16:02 +00:00
|
|
|
i915_gem_object_move_to_active(obj, ring);
|
2010-02-19 10:51:59 +00:00
|
|
|
|
|
|
|
/* update the fence lru list */
|
2010-04-28 09:02:31 +00:00
|
|
|
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
|
|
|
|
struct drm_i915_fence_reg *reg =
|
|
|
|
&dev_priv->fence_regs[obj_priv->fence_reg];
|
|
|
|
list_move_tail(®->lru_list,
|
2010-02-19 10:51:59 +00:00
|
|
|
&dev_priv->mm.fence_list);
|
2010-04-28 09:02:31 +00:00
|
|
|
}
|
2010-02-19 10:51:59 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
obj->read_domains,
|
|
|
|
old_write_domain);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-21 01:08:55 +00:00
|
|
|
|
2009-09-15 20:57:36 +00:00
|
|
|
uint32_t
|
2010-02-11 21:29:04 +00:00
|
|
|
i915_add_request(struct drm_device *dev,
|
2010-09-24 15:02:42 +00:00
|
|
|
struct drm_file *file,
|
2010-08-12 11:36:12 +00:00
|
|
|
struct drm_i915_gem_request *request,
|
2010-02-11 21:29:04 +00:00
|
|
|
struct intel_ring_buffer *ring)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-09-24 15:02:42 +00:00
|
|
|
struct drm_i915_file_private *file_priv = NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
uint32_t seqno;
|
|
|
|
int was_empty;
|
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
if (file != NULL)
|
|
|
|
file_priv = file->driver_priv;
|
2009-06-03 07:27:35 +00:00
|
|
|
|
2010-08-12 11:36:12 +00:00
|
|
|
if (request == NULL) {
|
|
|
|
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
|
|
if (request == NULL)
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
seqno = ring->add_request(dev, ring, 0);
|
2010-09-28 09:07:56 +00:00
|
|
|
ring->outstanding_lazy_request = false;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
request->seqno = seqno;
|
2010-05-21 01:08:56 +00:00
|
|
|
request->ring = ring;
|
2008-07-30 19:06:12 +00:00
|
|
|
request->emitted_jiffies = jiffies;
|
2010-05-21 01:08:56 +00:00
|
|
|
was_empty = list_empty(&ring->request_list);
|
|
|
|
list_add_tail(&request->list, &ring->request_list);
|
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
if (file_priv) {
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_lock(&file_priv->mm.lock);
|
2010-09-24 15:02:42 +00:00
|
|
|
request->file_priv = file_priv;
|
2009-06-03 07:27:35 +00:00
|
|
|
list_add_tail(&request->client_list,
|
2010-09-24 15:02:42 +00:00
|
|
|
&file_priv->mm.request_list);
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_unlock(&file_priv->mm.lock);
|
2009-06-03 07:27:35 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-09-14 21:48:44 +00:00
|
|
|
if (!dev_priv->mm.suspended) {
|
2010-09-13 22:44:34 +00:00
|
|
|
mod_timer(&dev_priv->hangcheck_timer,
|
|
|
|
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
2009-09-14 21:48:44 +00:00
|
|
|
if (was_empty)
|
2010-09-13 22:44:34 +00:00
|
|
|
queue_delayed_work(dev_priv->wq,
|
|
|
|
&dev_priv->mm.retire_work, HZ);
|
2009-09-14 21:48:44 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
return seqno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Command execution barrier
|
|
|
|
*
|
|
|
|
* Ensures that all commands in the ring are finished
|
|
|
|
* before signalling the CPU
|
|
|
|
*/
|
2010-02-11 21:29:04 +00:00
|
|
|
static void
|
2010-05-21 01:08:56 +00:00
|
|
|
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
uint32_t flush_domains = 0;
|
|
|
|
|
|
|
|
/* The sampler always gets flushed on i965 (sigh) */
|
2010-09-16 23:32:17 +00:00
|
|
|
if (INTEL_INFO(dev)->gen >= 4)
|
2008-07-30 19:06:12 +00:00
|
|
|
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
|
2010-05-21 01:08:56 +00:00
|
|
|
|
|
|
|
ring->flush(dev, ring,
|
|
|
|
I915_GEM_DOMAIN_COMMAND, flush_domains);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
static inline void
|
|
|
|
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-09-26 10:03:27 +00:00
|
|
|
struct drm_i915_file_private *file_priv = request->file_priv;
|
|
|
|
|
|
|
|
if (!file_priv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&file_priv->mm.lock);
|
|
|
|
list_del(&request->client_list);
|
|
|
|
request->file_priv = NULL;
|
|
|
|
spin_unlock(&file_priv->mm.lock);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
|
|
|
struct intel_ring_buffer *ring)
|
2010-09-19 11:21:28 +00:00
|
|
|
{
|
2010-09-22 09:31:52 +00:00
|
|
|
while (!list_empty(&ring->request_list)) {
|
|
|
|
struct drm_i915_gem_request *request;
|
2010-09-19 11:21:28 +00:00
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
request = list_first_entry(&ring->request_list,
|
|
|
|
struct drm_i915_gem_request,
|
|
|
|
list);
|
|
|
|
|
|
|
|
list_del(&request->list);
|
2010-09-24 15:02:42 +00:00
|
|
|
i915_gem_request_remove_from_client(request);
|
2010-09-22 09:31:52 +00:00
|
|
|
kfree(request);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!list_empty(&ring->active_list)) {
|
2010-09-19 11:21:28 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
obj_priv = list_first_entry(&ring->active_list,
|
2010-09-19 11:21:28 +00:00
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
|
|
|
|
obj_priv->base.write_domain = 0;
|
2010-09-22 09:31:52 +00:00
|
|
|
list_del_init(&obj_priv->gpu_write_list);
|
2010-09-19 11:21:28 +00:00
|
|
|
i915_gem_object_move_to_inactive(&obj_priv->base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-30 15:53:18 +00:00
|
|
|
void i915_gem_reset(struct drm_device *dev)
|
2010-09-19 11:31:36 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2010-09-30 15:53:18 +00:00
|
|
|
int i;
|
2010-09-19 11:31:36 +00:00
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
|
|
|
|
if (HAS_BSD(dev))
|
|
|
|
i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
|
|
|
|
|
|
|
|
/* Remove anything from the flushing lists. The GPU cache is likely
|
|
|
|
* to be lost on reset along with the data, so simply move the
|
|
|
|
* lost bo to the inactive list.
|
|
|
|
*/
|
|
|
|
while (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
|
|
obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
|
|
|
|
obj_priv->base.write_domain = 0;
|
|
|
|
list_del_init(&obj_priv->gpu_write_list);
|
|
|
|
i915_gem_object_move_to_inactive(&obj_priv->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move everything out of the GPU domains to ensure we do any
|
|
|
|
* necessary invalidation upon reuse.
|
|
|
|
*/
|
2010-09-19 11:31:36 +00:00
|
|
|
list_for_each_entry(obj_priv,
|
|
|
|
&dev_priv->mm.inactive_list,
|
|
|
|
list)
|
|
|
|
{
|
|
|
|
obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
|
|
|
|
}
|
2010-09-30 15:53:18 +00:00
|
|
|
|
|
|
|
/* The fence registers are invalidated so clear them out */
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
struct drm_i915_fence_reg *reg;
|
|
|
|
|
|
|
|
reg = &dev_priv->fence_regs[i];
|
|
|
|
if (!reg->obj)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
i915_gem_clear_fence_reg(reg->obj);
|
|
|
|
}
|
2010-09-19 11:31:36 +00:00
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/**
|
|
|
|
* This function clears the request list as sequence numbers are passed.
|
|
|
|
*/
|
2010-07-23 22:18:49 +00:00
|
|
|
static void
|
|
|
|
i915_gem_retire_requests_ring(struct drm_device *dev,
|
|
|
|
struct intel_ring_buffer *ring)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
uint32_t seqno;
|
|
|
|
|
2010-09-18 00:38:04 +00:00
|
|
|
if (!ring->status_page.page_addr ||
|
|
|
|
list_empty(&ring->request_list))
|
2009-02-23 14:07:57 +00:00
|
|
|
return;
|
|
|
|
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
seqno = ring->get_seqno(dev, ring);
|
2010-05-21 01:08:56 +00:00
|
|
|
while (!list_empty(&ring->request_list)) {
|
2008-07-30 19:06:12 +00:00
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
|
2010-05-21 01:08:56 +00:00
|
|
|
request = list_first_entry(&ring->request_list,
|
2008-07-30 19:06:12 +00:00
|
|
|
struct drm_i915_gem_request,
|
|
|
|
list);
|
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
if (!i915_seqno_passed(seqno, request->seqno))
|
2010-09-18 00:38:04 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
trace_i915_gem_request_retire(dev, request->seqno);
|
|
|
|
|
|
|
|
list_del(&request->list);
|
2010-09-24 15:02:42 +00:00
|
|
|
i915_gem_request_remove_from_client(request);
|
2010-09-18 00:38:04 +00:00
|
|
|
kfree(request);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move any buffers on the active list that are no longer referenced
|
|
|
|
* by the ringbuffer to the flushing/inactive lists as appropriate.
|
|
|
|
*/
|
|
|
|
while (!list_empty(&ring->active_list)) {
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
|
|
obj_priv = list_first_entry(&ring->active_list,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-22 09:31:52 +00:00
|
|
|
if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
|
2008-07-30 19:06:12 +00:00
|
|
|
break;
|
2010-09-18 00:38:04 +00:00
|
|
|
|
|
|
|
obj = &obj_priv->base;
|
|
|
|
if (obj->write_domain != 0)
|
|
|
|
i915_gem_object_move_to_flushing(obj);
|
|
|
|
else
|
|
|
|
i915_gem_object_move_to_inactive(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2009-09-24 04:26:06 +00:00
|
|
|
|
|
|
|
if (unlikely (dev_priv->trace_irq_seqno &&
|
|
|
|
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
|
2010-05-21 01:08:55 +00:00
|
|
|
ring->user_irq_put(dev, ring);
|
2009-09-24 04:26:06 +00:00
|
|
|
dev_priv->trace_irq_seqno = 0;
|
|
|
|
}
|
2010-09-29 15:10:57 +00:00
|
|
|
|
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 22:18:49 +00:00
|
|
|
void
|
|
|
|
i915_gem_retire_requests(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
2010-07-23 22:18:50 +00:00
|
|
|
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
|
|
|
|
struct drm_i915_gem_object *obj_priv, *tmp;
|
|
|
|
|
|
|
|
/* We must be careful that during unbind() we do not
|
|
|
|
* accidentally infinitely recurse into retire requests.
|
|
|
|
* Currently:
|
|
|
|
* retire -> free -> unbind -> wait -> retire_ring
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(obj_priv, tmp,
|
|
|
|
&dev_priv->mm.deferred_free_list,
|
|
|
|
list)
|
|
|
|
i915_gem_free_object_tail(&obj_priv->base);
|
|
|
|
}
|
|
|
|
|
2010-07-23 22:18:49 +00:00
|
|
|
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
|
|
|
|
if (HAS_BSD(dev))
|
|
|
|
i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
|
|
|
|
}
|
|
|
|
|
2010-08-20 22:25:16 +00:00
|
|
|
static void
|
2008-07-30 19:06:12 +00:00
|
|
|
i915_gem_retire_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv;
|
|
|
|
struct drm_device *dev;
|
|
|
|
|
|
|
|
dev_priv = container_of(work, drm_i915_private_t,
|
|
|
|
mm.retire_work.work);
|
|
|
|
dev = dev_priv->dev;
|
|
|
|
|
2010-09-29 11:26:37 +00:00
|
|
|
/* Come back later if the device is busy... */
|
|
|
|
if (!mutex_trylock(&dev->struct_mutex)) {
|
|
|
|
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-07-23 22:18:49 +00:00
|
|
|
i915_gem_retire_requests(dev);
|
2010-05-21 01:08:57 +00:00
|
|
|
|
2008-10-15 04:41:13 +00:00
|
|
|
if (!dev_priv->mm.suspended &&
|
2010-05-21 01:08:57 +00:00
|
|
|
(!list_empty(&dev_priv->render_ring.request_list) ||
|
|
|
|
(HAS_BSD(dev) &&
|
|
|
|
!list_empty(&dev_priv->bsd_ring.request_list))))
|
2009-08-03 23:09:16 +00:00
|
|
|
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
2008-07-30 19:06:12 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
}
|
|
|
|
|
2009-09-15 20:57:36 +00:00
|
|
|
int
|
2010-05-21 01:08:56 +00:00
|
|
|
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
2010-02-11 21:29:04 +00:00
|
|
|
bool interruptible, struct intel_ring_buffer *ring)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2009-05-05 23:03:48 +00:00
|
|
|
u32 ier;
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
BUG_ON(seqno == 0);
|
|
|
|
|
2010-09-25 09:19:17 +00:00
|
|
|
if (atomic_read(&dev_priv->mm.wedged))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2010-09-28 09:07:56 +00:00
|
|
|
if (ring->outstanding_lazy_request) {
|
2010-08-12 11:36:12 +00:00
|
|
|
seqno = i915_add_request(dev, NULL, NULL, ring);
|
drm/i915: allow lazy emitting of requests
Sometimes (like when flushing in preparation of batchbuffer execution)
we know that we'll emit a request but haven't yet done so. Allow this
case by simply taking the next seqno by default. Ensure that a request
is eventually emitted before waiting for an request by issuing it
in i915_wait_request iff this is not yet done.
Also replace one open-coded version of i915_gem_object_wait_rendering,
to prevent future code-diversion.
Chris Wilson asked me to explain and clarify what this patch does and why.
Here it goes:
Old way of moving objects onto the active list and associating them with a
reques:
1. i915_add_request + store the returned seqno somewhere
2. i915_gem_object_move_to_active (with the stored seqno as parameter)
For the current users, this is all fine. But I'd like to associate objects
(and fence regs) with the batchbuffer request deep down in the execbuf
call-chain. I thought about three ways of implementing this.
a) Don't care, just emit request when we need a new seqno. When heavily
pipelining fence reg changes, this would have caused tons of superflous
request (and corresponding irqs).
b) Thread all changed fences, objects, whatever through the execbuf-maze,
so that when we emit a request, we can store the new seqno at all the right
places.
c) Kill that seqno-threading-around business by simply storing the next
seqno, i.e. allow 2. to be done before 1. in the above sequence.
I've decided to implement c) (in this patch). The following patches are
just fall-out that resulted from this small conceptual change.
* We can handle the flushing list processing where we actually emit a flush
(i915_gem_flush and i915_retire_commands) instead of in i915_add_request.
The code makes IMHO more sense this way (and i915_add_request looses the
flush_domains parameter, obviously).
* We can avoid emitting unnecessary requests. IMHO there's no point in
emitting more than one request per batchbuffer (with or without an
corresponding irq).
* By enforcing 2. before 1. ordering in the above sequence the seqno
argument of i915_gem_object_move_to_active is redundant and can be
dropped.
v2: Now i915_wait_request issues request if it is not yet emitted.
Also introduce i915_gem_next_request_seqno(dev) just in case we ever
need to do some prep work before using a new seqno.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[ickle: Keep i915_gem_object_set_to_display_plane() uninterruptible.]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-02-11 21:13:59 +00:00
|
|
|
if (seqno == 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2010-09-28 09:07:56 +00:00
|
|
|
BUG_ON(seqno == dev_priv->next_seqno);
|
drm/i915: allow lazy emitting of requests
Sometimes (like when flushing in preparation of batchbuffer execution)
we know that we'll emit a request but haven't yet done so. Allow this
case by simply taking the next seqno by default. Ensure that a request
is eventually emitted before waiting for an request by issuing it
in i915_wait_request iff this is not yet done.
Also replace one open-coded version of i915_gem_object_wait_rendering,
to prevent future code-diversion.
Chris Wilson asked me to explain and clarify what this patch does and why.
Here it goes:
Old way of moving objects onto the active list and associating them with a
reques:
1. i915_add_request + store the returned seqno somewhere
2. i915_gem_object_move_to_active (with the stored seqno as parameter)
For the current users, this is all fine. But I'd like to associate objects
(and fence regs) with the batchbuffer request deep down in the execbuf
call-chain. I thought about three ways of implementing this.
a) Don't care, just emit request when we need a new seqno. When heavily
pipelining fence reg changes, this would have caused tons of superflous
request (and corresponding irqs).
b) Thread all changed fences, objects, whatever through the execbuf-maze,
so that when we emit a request, we can store the new seqno at all the right
places.
c) Kill that seqno-threading-around business by simply storing the next
seqno, i.e. allow 2. to be done before 1. in the above sequence.
I've decided to implement c) (in this patch). The following patches are
just fall-out that resulted from this small conceptual change.
* We can handle the flushing list processing where we actually emit a flush
(i915_gem_flush and i915_retire_commands) instead of in i915_add_request.
The code makes IMHO more sense this way (and i915_add_request looses the
flush_domains parameter, obviously).
* We can avoid emitting unnecessary requests. IMHO there's no point in
emitting more than one request per batchbuffer (with or without an
corresponding irq).
* By enforcing 2. before 1. ordering in the above sequence the seqno
argument of i915_gem_object_move_to_active is redundant and can be
dropped.
v2: Now i915_wait_request issues request if it is not yet emitted.
Also introduce i915_gem_next_request_seqno(dev) just in case we ever
need to do some prep work before using a new seqno.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[ickle: Keep i915_gem_object_set_to_display_plane() uninterruptible.]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-02-11 21:13:59 +00:00
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
|
2009-10-22 23:11:14 +00:00
|
|
|
if (HAS_PCH_SPLIT(dev))
|
2009-06-08 06:40:19 +00:00
|
|
|
ier = I915_READ(DEIER) | I915_READ(GTIER);
|
|
|
|
else
|
|
|
|
ier = I915_READ(IER);
|
2009-05-05 23:03:48 +00:00
|
|
|
if (!ier) {
|
|
|
|
DRM_ERROR("something (likely vbetool) disabled "
|
|
|
|
"interrupts, re-enabling\n");
|
|
|
|
i915_driver_irq_preinstall(dev);
|
|
|
|
i915_driver_irq_postinstall(dev);
|
|
|
|
}
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_request_wait_begin(dev, seqno);
|
|
|
|
|
2010-05-21 01:08:56 +00:00
|
|
|
ring->waiting_gem_seqno = seqno;
|
2010-05-21 01:08:55 +00:00
|
|
|
ring->user_irq_get(dev, ring);
|
2009-09-15 20:57:32 +00:00
|
|
|
if (interruptible)
|
2010-05-21 01:08:56 +00:00
|
|
|
ret = wait_event_interruptible(ring->irq_queue,
|
|
|
|
i915_seqno_passed(
|
2010-09-24 15:02:42 +00:00
|
|
|
ring->get_seqno(dev, ring), seqno)
|
2010-05-21 01:08:56 +00:00
|
|
|
|| atomic_read(&dev_priv->mm.wedged));
|
2009-09-15 20:57:32 +00:00
|
|
|
else
|
2010-05-21 01:08:56 +00:00
|
|
|
wait_event(ring->irq_queue,
|
|
|
|
i915_seqno_passed(
|
2010-09-24 15:02:42 +00:00
|
|
|
ring->get_seqno(dev, ring), seqno)
|
2010-05-21 01:08:56 +00:00
|
|
|
|| atomic_read(&dev_priv->mm.wedged));
|
2009-09-15 20:57:32 +00:00
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
ring->user_irq_put(dev, ring);
|
2010-05-21 01:08:56 +00:00
|
|
|
ring->waiting_gem_seqno = 0;
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
trace_i915_gem_request_wait_end(dev, seqno);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2009-09-14 21:48:47 +00:00
|
|
|
if (atomic_read(&dev_priv->mm.wedged))
|
2010-09-25 09:19:17 +00:00
|
|
|
ret = -EAGAIN;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
if (ret && ret != -ERESTARTSYS)
|
2010-02-11 21:19:40 +00:00
|
|
|
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
|
2010-09-24 15:02:42 +00:00
|
|
|
__func__, ret, seqno, ring->get_seqno(dev, ring),
|
2010-02-11 21:19:40 +00:00
|
|
|
dev_priv->next_seqno);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* Directly dispatch request retiring. While we have the work queue
|
|
|
|
* to handle this, the waiter on a request often wants an associated
|
|
|
|
* buffer to have made it to the inactive list, and we would need
|
|
|
|
* a separate wait queue to handle that.
|
|
|
|
*/
|
|
|
|
if (ret == 0)
|
2010-07-23 22:18:49 +00:00
|
|
|
i915_gem_retire_requests_ring(dev, ring);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-15 20:57:32 +00:00
|
|
|
/**
|
|
|
|
* Waits for a sequence number to be signaled, and cleans up the
|
|
|
|
* request and object lists appropriately for that event.
|
|
|
|
*/
|
|
|
|
static int
|
2010-05-21 01:08:56 +00:00
|
|
|
i915_wait_request(struct drm_device *dev, uint32_t seqno,
|
2010-09-28 09:07:56 +00:00
|
|
|
struct intel_ring_buffer *ring)
|
2009-09-15 20:57:32 +00:00
|
|
|
{
|
2010-05-21 01:08:56 +00:00
|
|
|
return i915_do_wait_request(dev, seqno, 1, ring);
|
2009-09-15 20:57:32 +00:00
|
|
|
}
|
|
|
|
|
2010-09-23 10:00:38 +00:00
|
|
|
static void
|
2010-09-18 10:02:01 +00:00
|
|
|
i915_gem_flush_ring(struct drm_device *dev,
|
2010-09-20 11:50:23 +00:00
|
|
|
struct drm_file *file_priv,
|
2010-09-18 10:02:01 +00:00
|
|
|
struct intel_ring_buffer *ring,
|
|
|
|
uint32_t invalidate_domains,
|
|
|
|
uint32_t flush_domains)
|
|
|
|
{
|
|
|
|
ring->flush(dev, ring, invalidate_domains, flush_domains);
|
|
|
|
i915_gem_process_flushing_list(dev, flush_domains, ring);
|
|
|
|
}
|
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
static void
|
|
|
|
i915_gem_flush(struct drm_device *dev,
|
2010-09-20 11:50:23 +00:00
|
|
|
struct drm_file *file_priv,
|
2010-05-21 01:08:55 +00:00
|
|
|
uint32_t invalidate_domains,
|
2010-09-18 10:02:01 +00:00
|
|
|
uint32_t flush_domains,
|
|
|
|
uint32_t flush_rings)
|
2010-05-21 01:08:55 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-02-11 21:19:40 +00:00
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
|
|
|
drm_agp_chipset_flush(dev);
|
2010-02-11 21:19:40 +00:00
|
|
|
|
2010-09-18 10:02:01 +00:00
|
|
|
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
|
|
|
|
if (flush_rings & RING_RENDER)
|
2010-09-20 11:50:23 +00:00
|
|
|
i915_gem_flush_ring(dev, file_priv,
|
2010-09-18 10:02:01 +00:00
|
|
|
&dev_priv->render_ring,
|
|
|
|
invalidate_domains, flush_domains);
|
|
|
|
if (flush_rings & RING_BSD)
|
2010-09-20 11:50:23 +00:00
|
|
|
i915_gem_flush_ring(dev, file_priv,
|
2010-09-18 10:02:01 +00:00
|
|
|
&dev_priv->bsd_ring,
|
|
|
|
invalidate_domains, flush_domains);
|
|
|
|
}
|
2010-05-21 01:08:55 +00:00
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/**
|
|
|
|
* Ensures that all rendering to the object has completed and the object is
|
|
|
|
* safe to unbind from the GTT or access from the CPU.
|
|
|
|
*/
|
|
|
|
static int
|
2010-09-14 12:03:28 +00:00
|
|
|
i915_gem_object_wait_rendering(struct drm_gem_object *obj,
|
|
|
|
bool interruptible)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret;
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* This function only exists to support waiting for existing rendering,
|
|
|
|
* not for emitting required flushes.
|
2008-07-30 19:06:12 +00:00
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* If there is rendering queued on the buffer being evicted, wait for
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
if (obj_priv->active) {
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_do_wait_request(dev,
|
|
|
|
obj_priv->last_rendering_seqno,
|
|
|
|
interruptible,
|
|
|
|
obj_priv->ring);
|
|
|
|
if (ret)
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Unbinds an object from the GTT aperture.
|
|
|
|
*/
|
2009-01-27 01:10:45 +00:00
|
|
|
int
|
2008-07-30 19:06:12 +00:00
|
|
|
i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-09-30 10:46:12 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (obj_priv->gtt_space == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (obj_priv->pin_count != 0) {
|
|
|
|
DRM_ERROR("Attempting to unbind pinned buffer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-09-09 18:50:45 +00:00
|
|
|
/* blow away mappings if mapped through GTT */
|
|
|
|
i915_gem_release_mmap(obj);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* Move the object to the CPU domain to ensure that
|
|
|
|
* any possible CPU writes while it's not in the GTT
|
|
|
|
* are flushed when we go to remap it. This will
|
|
|
|
* also ensure that all pending GPU writes are finished
|
|
|
|
* before we unbind.
|
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
2010-07-23 22:18:51 +00:00
|
|
|
if (ret == -ERESTARTSYS)
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
2010-07-23 22:18:51 +00:00
|
|
|
/* Continue on if we fail due to EIO, the GPU is hung so we
|
|
|
|
* should be safe and we need to cleanup or else we might
|
|
|
|
* cause memory corruption through use-after-free.
|
|
|
|
*/
|
2010-09-30 14:08:57 +00:00
|
|
|
if (ret) {
|
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-12-15 16:50:00 +00:00
|
|
|
/* release the fence reg _after_ flushing */
|
|
|
|
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
|
|
|
i915_gem_clear_fence_reg(obj);
|
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
drm_unbind_agp(obj_priv->agp_mem);
|
|
|
|
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
i915_gem_object_put_pages(obj);
|
2009-09-20 20:29:47 +00:00
|
|
|
BUG_ON(obj_priv->pages_refcount);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_remove_gtt(dev_priv, obj->size);
|
2010-09-20 16:36:15 +00:00
|
|
|
list_del_init(&obj_priv->list);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
|
|
obj_priv->gtt_space = NULL;
|
|
|
|
|
2009-09-20 22:03:54 +00:00
|
|
|
if (i915_gem_object_is_purgeable(obj_priv))
|
|
|
|
i915_gem_object_truncate(obj);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_unbind(obj);
|
|
|
|
|
2010-07-23 22:18:51 +00:00
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-28 09:07:56 +00:00
|
|
|
static int i915_ring_idle(struct drm_device *dev,
|
|
|
|
struct intel_ring_buffer *ring)
|
|
|
|
{
|
|
|
|
i915_gem_flush_ring(dev, NULL, ring,
|
|
|
|
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
|
|
|
return i915_wait_request(dev,
|
|
|
|
i915_gem_next_request_seqno(dev, ring),
|
|
|
|
ring);
|
|
|
|
}
|
|
|
|
|
2010-08-07 10:01:23 +00:00
|
|
|
int
|
2010-02-19 10:52:00 +00:00
|
|
|
i915_gpu_idle(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
bool lists_empty;
|
2010-05-21 01:08:56 +00:00
|
|
|
int ret;
|
2010-02-19 10:52:00 +00:00
|
|
|
|
2010-05-21 01:08:57 +00:00
|
|
|
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
|
|
|
list_empty(&dev_priv->render_ring.active_list) &&
|
|
|
|
(!HAS_BSD(dev) ||
|
|
|
|
list_empty(&dev_priv->bsd_ring.active_list)));
|
2010-02-19 10:52:00 +00:00
|
|
|
if (lists_empty)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Flush everything onto the inactive list. */
|
2010-09-28 09:07:56 +00:00
|
|
|
ret = i915_ring_idle(dev, &dev_priv->render_ring);
|
2010-02-11 21:29:04 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-05-21 01:08:57 +00:00
|
|
|
|
|
|
|
if (HAS_BSD(dev)) {
|
2010-09-28 09:07:56 +00:00
|
|
|
ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
|
2010-05-21 01:08:57 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-02-11 21:29:04 +00:00
|
|
|
return 0;
|
2010-02-19 10:52:00 +00:00
|
|
|
}
|
|
|
|
|
2010-09-27 14:51:07 +00:00
|
|
|
static int
|
2010-01-27 13:36:32 +00:00
|
|
|
i915_gem_object_get_pages(struct drm_gem_object *obj,
|
|
|
|
gfp_t gfpmask)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
int page_count, i;
|
|
|
|
struct address_space *mapping;
|
|
|
|
struct inode *inode;
|
|
|
|
struct page *page;
|
|
|
|
|
2010-05-13 09:49:44 +00:00
|
|
|
BUG_ON(obj_priv->pages_refcount
|
|
|
|
== DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
|
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
if (obj_priv->pages_refcount++ != 0)
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Get the list of pages out of our struct file. They'll be pinned
|
|
|
|
* at this point until we release them.
|
|
|
|
*/
|
|
|
|
page_count = obj->size / PAGE_SIZE;
|
2009-03-19 21:10:50 +00:00
|
|
|
BUG_ON(obj_priv->pages != NULL);
|
2009-05-08 23:13:25 +00:00
|
|
|
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
|
2009-03-19 21:10:50 +00:00
|
|
|
if (obj_priv->pages == NULL) {
|
|
|
|
obj_priv->pages_refcount--;
|
2008-07-30 19:06:12 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode = obj->filp->f_path.dentry->d_inode;
|
|
|
|
mapping = inode->i_mapping;
|
|
|
|
for (i = 0; i < page_count; i++) {
|
2010-01-27 13:36:32 +00:00
|
|
|
page = read_cache_page_gfp(mapping, i,
|
2010-07-02 00:04:42 +00:00
|
|
|
GFP_HIGHUSER |
|
2010-01-27 13:36:32 +00:00
|
|
|
__GFP_COLD |
|
2010-07-18 16:44:37 +00:00
|
|
|
__GFP_RECLAIMABLE |
|
2010-01-27 13:36:32 +00:00
|
|
|
gfpmask);
|
2010-03-12 19:52:55 +00:00
|
|
|
if (IS_ERR(page))
|
|
|
|
goto err_pages;
|
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
obj_priv->pages[i] = page;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2009-03-12 23:56:27 +00:00
|
|
|
|
|
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
|
|
|
i915_gem_object_do_bit_17_swizzle(obj);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
2010-03-12 19:52:55 +00:00
|
|
|
|
|
|
|
err_pages:
|
|
|
|
while (i--)
|
|
|
|
page_cache_release(obj_priv->pages[i]);
|
|
|
|
|
|
|
|
drm_free_large(obj_priv->pages);
|
|
|
|
obj_priv->pages = NULL;
|
|
|
|
obj_priv->pages_refcount--;
|
|
|
|
return PTR_ERR(page);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2009-10-26 23:44:17 +00:00
|
|
|
static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = reg->obj;
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-10-26 23:44:17 +00:00
|
|
|
int regnum = obj_priv->fence_reg;
|
|
|
|
uint64_t val;
|
|
|
|
|
|
|
|
val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
|
|
|
|
0xfffff000) << 32;
|
|
|
|
val |= obj_priv->gtt_offset & 0xfffff000;
|
|
|
|
val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
|
|
|
|
SANDYBRIDGE_FENCE_PITCH_SHIFT;
|
|
|
|
|
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
|
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
|
|
|
val |= I965_FENCE_REG_VALID;
|
|
|
|
|
|
|
|
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = reg->obj;
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
int regnum = obj_priv->fence_reg;
|
|
|
|
uint64_t val;
|
|
|
|
|
|
|
|
val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
|
|
|
|
0xfffff000) << 32;
|
|
|
|
val |= obj_priv->gtt_offset & 0xfffff000;
|
|
|
|
val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
|
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
|
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
|
|
|
val |= I965_FENCE_REG_VALID;
|
|
|
|
|
|
|
|
I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = reg->obj;
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
int regnum = obj_priv->fence_reg;
|
2009-01-27 01:10:45 +00:00
|
|
|
int tile_width;
|
2009-03-11 05:34:49 +00:00
|
|
|
uint32_t fence_reg, val;
|
2008-11-12 18:03:55 +00:00
|
|
|
uint32_t pitch_val;
|
|
|
|
|
|
|
|
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
|
|
|
|
(obj_priv->gtt_offset & (obj->size - 1))) {
|
2009-02-09 16:57:29 +00:00
|
|
|
WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
|
2009-01-27 01:10:45 +00:00
|
|
|
__func__, obj_priv->gtt_offset, obj->size);
|
2008-11-12 18:03:55 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-01-27 01:10:45 +00:00
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y &&
|
|
|
|
HAS_128_BYTE_Y_TILING(dev))
|
|
|
|
tile_width = 128;
|
2008-11-12 18:03:55 +00:00
|
|
|
else
|
2009-01-27 01:10:45 +00:00
|
|
|
tile_width = 512;
|
|
|
|
|
|
|
|
/* Note: pitch better be a power of two tile widths */
|
|
|
|
pitch_val = obj_priv->stride / tile_width;
|
|
|
|
pitch_val = ffs(pitch_val) - 1;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-04-17 13:12:03 +00:00
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y &&
|
|
|
|
HAS_128_BYTE_Y_TILING(dev))
|
|
|
|
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
|
|
|
|
else
|
|
|
|
WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
val = obj_priv->gtt_offset;
|
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
|
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
|
|
|
val |= I915_FENCE_SIZE_BITS(obj->size);
|
|
|
|
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
|
|
|
val |= I830_FENCE_REG_VALID;
|
|
|
|
|
2009-03-11 05:34:49 +00:00
|
|
|
if (regnum < 8)
|
|
|
|
fence_reg = FENCE_REG_830_0 + (regnum * 4);
|
|
|
|
else
|
|
|
|
fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
|
|
|
|
I915_WRITE(fence_reg, val);
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = reg->obj;
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
int regnum = obj_priv->fence_reg;
|
|
|
|
uint32_t val;
|
|
|
|
uint32_t pitch_val;
|
2009-03-29 12:09:41 +00:00
|
|
|
uint32_t fence_size_bits;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2009-03-29 12:09:41 +00:00
|
|
|
if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
|
2008-11-12 18:03:55 +00:00
|
|
|
(obj_priv->gtt_offset & (obj->size - 1))) {
|
2009-03-29 12:09:41 +00:00
|
|
|
WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
|
2009-01-27 01:10:45 +00:00
|
|
|
__func__, obj_priv->gtt_offset);
|
2008-11-12 18:03:55 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-27 00:44:56 +00:00
|
|
|
pitch_val = obj_priv->stride / 128;
|
|
|
|
pitch_val = ffs(pitch_val) - 1;
|
|
|
|
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
val = obj_priv->gtt_offset;
|
|
|
|
if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
|
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
2009-03-29 12:09:41 +00:00
|
|
|
fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
|
|
|
|
WARN_ON(fence_size_bits & ~0x00000f00);
|
|
|
|
val |= fence_size_bits;
|
2008-11-12 18:03:55 +00:00
|
|
|
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
|
|
|
val |= I830_FENCE_REG_VALID;
|
|
|
|
|
|
|
|
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
|
|
|
|
}
|
|
|
|
|
2010-09-14 12:03:28 +00:00
|
|
|
static int i915_find_fence_reg(struct drm_device *dev,
|
|
|
|
bool interruptible)
|
2010-02-19 10:51:58 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_fence_reg *reg = NULL;
|
|
|
|
struct drm_i915_gem_object *obj_priv = NULL;
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object *obj = NULL;
|
|
|
|
int i, avail, ret;
|
|
|
|
|
|
|
|
/* First try to find a free reg */
|
|
|
|
avail = 0;
|
|
|
|
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
|
|
|
|
reg = &dev_priv->fence_regs[i];
|
|
|
|
if (!reg->obj)
|
|
|
|
return i;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(reg->obj);
|
2010-02-19 10:51:58 +00:00
|
|
|
if (!obj_priv->pin_count)
|
|
|
|
avail++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avail == 0)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
/* None available, try to steal one or wait for a user to finish */
|
|
|
|
i = I915_FENCE_REG_NONE;
|
2010-04-28 09:02:31 +00:00
|
|
|
list_for_each_entry(reg, &dev_priv->mm.fence_list,
|
|
|
|
lru_list) {
|
|
|
|
obj = reg->obj;
|
|
|
|
obj_priv = to_intel_bo(obj);
|
2010-02-19 10:51:58 +00:00
|
|
|
|
|
|
|
if (obj_priv->pin_count)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* found one! */
|
|
|
|
i = obj_priv->fence_reg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(i == I915_FENCE_REG_NONE);
|
|
|
|
|
|
|
|
/* We only have a reference on obj from the active list. put_fence_reg
|
|
|
|
* might drop that one, causing a use-after-free in it. So hold a
|
|
|
|
* private reference to obj like the other callers of put_fence_reg
|
|
|
|
* (set_tiling ioctl) do. */
|
|
|
|
drm_gem_object_reference(obj);
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_put_fence_reg(obj, interruptible);
|
2010-02-19 10:51:58 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_object_get_fence_reg - set up a fence reg for an object
|
|
|
|
* @obj: object to map through a fence reg
|
|
|
|
*
|
|
|
|
* When mapping objects through the GTT, userspace wants to be able to write
|
|
|
|
* to them without having to worry about swizzling if the object is tiled.
|
|
|
|
*
|
|
|
|
* This function walks the fence regs looking for a free one for @obj,
|
|
|
|
* stealing one if it can't find any.
|
|
|
|
*
|
|
|
|
* It then sets up the reg based on the object's properties: address, pitch
|
|
|
|
* and tiling format.
|
|
|
|
*/
|
2009-06-17 21:08:52 +00:00
|
|
|
int
|
2010-09-14 12:03:28 +00:00
|
|
|
i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
|
|
|
|
bool interruptible)
|
2008-11-12 18:03:55 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
struct drm_i915_fence_reg *reg = NULL;
|
2010-02-19 10:51:58 +00:00
|
|
|
int ret;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2009-08-29 19:49:51 +00:00
|
|
|
/* Just update our place in the LRU if our fence is getting used. */
|
|
|
|
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
|
2010-04-28 09:02:31 +00:00
|
|
|
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
|
|
|
|
list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
|
2009-08-29 19:49:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
switch (obj_priv->tiling_mode) {
|
|
|
|
case I915_TILING_NONE:
|
|
|
|
WARN(1, "allocating a fence for non-tiled object?\n");
|
|
|
|
break;
|
|
|
|
case I915_TILING_X:
|
2009-01-27 01:10:45 +00:00
|
|
|
if (!obj_priv->stride)
|
|
|
|
return -EINVAL;
|
|
|
|
WARN((obj_priv->stride & (512 - 1)),
|
|
|
|
"object 0x%08x is X tiled but has non-512B pitch\n",
|
|
|
|
obj_priv->gtt_offset);
|
2008-11-12 18:03:55 +00:00
|
|
|
break;
|
|
|
|
case I915_TILING_Y:
|
2009-01-27 01:10:45 +00:00
|
|
|
if (!obj_priv->stride)
|
|
|
|
return -EINVAL;
|
|
|
|
WARN((obj_priv->stride & (128 - 1)),
|
|
|
|
"object 0x%08x is Y tiled but has non-128B pitch\n",
|
|
|
|
obj_priv->gtt_offset);
|
2008-11-12 18:03:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_find_fence_reg(dev, interruptible);
|
2010-02-19 10:51:58 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-02-19 10:51:58 +00:00
|
|
|
obj_priv->fence_reg = ret;
|
|
|
|
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
|
2010-04-28 09:02:31 +00:00
|
|
|
list_add_tail(®->lru_list, &dev_priv->mm.fence_list);
|
2009-08-29 19:49:51 +00:00
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
reg->obj = obj;
|
|
|
|
|
2010-09-16 23:32:02 +00:00
|
|
|
switch (INTEL_INFO(dev)->gen) {
|
|
|
|
case 6:
|
2009-10-26 23:44:17 +00:00
|
|
|
sandybridge_write_fence_reg(reg);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
case 4:
|
2008-11-12 18:03:55 +00:00
|
|
|
i965_write_fence_reg(reg);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2008-11-12 18:03:55 +00:00
|
|
|
i915_write_fence_reg(reg);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2008-11-12 18:03:55 +00:00
|
|
|
i830_write_fence_reg(reg);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-01-27 18:33:49 +00:00
|
|
|
|
2010-02-19 10:51:58 +00:00
|
|
|
trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
|
|
|
|
obj_priv->tiling_mode);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2009-01-27 18:33:49 +00:00
|
|
|
return 0;
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_clear_fence_reg - clear out fence register info
|
|
|
|
* @obj: object to clear
|
|
|
|
*
|
|
|
|
* Zeroes out the fence register itself and clears out the associated
|
|
|
|
* data structures in dev_priv and obj_priv.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
i915_gem_clear_fence_reg(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-04-28 09:02:31 +00:00
|
|
|
struct drm_i915_fence_reg *reg =
|
|
|
|
&dev_priv->fence_regs[obj_priv->fence_reg];
|
2010-09-16 23:32:02 +00:00
|
|
|
uint32_t fence_reg;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-09-16 23:32:02 +00:00
|
|
|
switch (INTEL_INFO(dev)->gen) {
|
|
|
|
case 6:
|
2009-10-26 23:44:17 +00:00
|
|
|
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
|
|
|
|
(obj_priv->fence_reg * 8), 0);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
case 4:
|
2008-11-12 18:03:55 +00:00
|
|
|
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2010-09-22 18:10:44 +00:00
|
|
|
if (obj_priv->fence_reg >= 8)
|
2010-09-16 23:32:02 +00:00
|
|
|
fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
|
2009-03-11 05:34:49 +00:00
|
|
|
else
|
2010-09-16 23:32:02 +00:00
|
|
|
case 2:
|
|
|
|
fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
|
2009-03-11 05:34:49 +00:00
|
|
|
|
|
|
|
I915_WRITE(fence_reg, 0);
|
2010-09-16 23:32:02 +00:00
|
|
|
break;
|
2009-03-11 05:34:49 +00:00
|
|
|
}
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-04-28 09:02:31 +00:00
|
|
|
reg->obj = NULL;
|
2008-11-12 18:03:55 +00:00
|
|
|
obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
2010-04-28 09:02:31 +00:00
|
|
|
list_del_init(®->lru_list);
|
2008-11-12 18:03:55 +00:00
|
|
|
}
|
|
|
|
|
2009-06-06 08:46:01 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_object_put_fence_reg - waits on outstanding fenced access
|
|
|
|
* to the buffer to finish, and then resets the fence register.
|
|
|
|
* @obj: tiled object holding a fence register.
|
2010-09-14 12:03:28 +00:00
|
|
|
* @bool: whether the wait upon the fence is interruptible
|
2009-06-06 08:46:01 +00:00
|
|
|
*
|
|
|
|
* Zeroes out the fence register itself and clears out the associated
|
|
|
|
* data structures in dev_priv and obj_priv.
|
|
|
|
*/
|
|
|
|
int
|
2010-09-14 12:03:28 +00:00
|
|
|
i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
|
|
|
|
bool interruptible)
|
2009-06-06 08:46:01 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-09-20 10:40:50 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-09-20 10:40:50 +00:00
|
|
|
struct drm_i915_fence_reg *reg;
|
2009-06-06 08:46:01 +00:00
|
|
|
|
|
|
|
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
|
|
|
|
return 0;
|
|
|
|
|
2010-02-01 12:59:17 +00:00
|
|
|
/* If we've changed tiling, GTT-mappings of the object
|
|
|
|
* need to re-fault to ensure that the correct fence register
|
|
|
|
* setup is in place.
|
|
|
|
*/
|
|
|
|
i915_gem_release_mmap(obj);
|
|
|
|
|
2009-06-06 08:46:01 +00:00
|
|
|
/* On the i915, GPU access to tiled buffers is via a fence,
|
|
|
|
* therefore we must wait for any outstanding access to complete
|
|
|
|
* before clearing the fence.
|
|
|
|
*/
|
2010-09-20 10:40:50 +00:00
|
|
|
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
|
|
|
|
if (reg->gpu) {
|
2009-06-06 08:46:01 +00:00
|
|
|
int ret;
|
|
|
|
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_flush_gpu_write_domain(obj, true);
|
2010-09-14 09:22:23 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_wait_rendering(obj, interruptible);
|
2010-09-14 09:22:23 +00:00
|
|
|
if (ret)
|
2009-06-06 08:46:01 +00:00
|
|
|
return ret;
|
2010-09-20 10:40:50 +00:00
|
|
|
|
|
|
|
reg->gpu = false;
|
2009-06-06 08:46:01 +00:00
|
|
|
}
|
|
|
|
|
2010-02-01 12:59:16 +00:00
|
|
|
i915_gem_object_flush_gtt_write_domain(obj);
|
2010-09-14 09:22:23 +00:00
|
|
|
i915_gem_clear_fence_reg(obj);
|
2009-06-06 08:46:01 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/**
|
|
|
|
* Finds free space in the GTT aperture and binds the object there.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
struct drm_mm_node *free_space;
|
2010-01-27 13:36:32 +00:00
|
|
|
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
|
2009-09-14 15:50:30 +00:00
|
|
|
int ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-09-22 13:24:13 +00:00
|
|
|
if (obj_priv->madv != I915_MADV_WILLNEED) {
|
2009-09-14 15:50:29 +00:00
|
|
|
DRM_ERROR("Attempting to bind a purgeable object\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
if (alignment == 0)
|
2009-01-27 01:10:45 +00:00
|
|
|
alignment = i915_gem_get_gtt_alignment(obj);
|
2009-03-29 12:09:41 +00:00
|
|
|
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
|
2008-07-30 19:06:12 +00:00
|
|
|
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-05-27 12:18:21 +00:00
|
|
|
/* If the object is bigger than the entire aperture, reject it early
|
|
|
|
* before evicting everything in a vain attempt to find space.
|
|
|
|
*/
|
2010-09-30 10:46:12 +00:00
|
|
|
if (obj->size > dev_priv->mm.gtt_total) {
|
2010-05-27 12:18:21 +00:00
|
|
|
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
search_free:
|
|
|
|
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
|
|
|
|
obj->size, alignment, 0);
|
|
|
|
if (free_space != NULL) {
|
|
|
|
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
|
|
|
|
alignment);
|
2010-07-02 14:02:12 +00:00
|
|
|
if (obj_priv->gtt_space != NULL)
|
2008-07-30 19:06:12 +00:00
|
|
|
obj_priv->gtt_offset = obj_priv->gtt_space->start;
|
|
|
|
}
|
|
|
|
if (obj_priv->gtt_space == NULL) {
|
|
|
|
/* If the gtt is empty and we're still having trouble
|
|
|
|
* fitting our object in, we're out of memory.
|
|
|
|
*/
|
2010-08-07 10:01:21 +00:00
|
|
|
ret = i915_gem_evict_something(dev, obj->size, alignment);
|
2009-09-20 23:22:34 +00:00
|
|
|
if (ret)
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
2009-09-20 23:22:34 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
goto search_free;
|
|
|
|
}
|
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, gfpmask);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (ret) {
|
|
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
|
|
obj_priv->gtt_space = NULL;
|
2009-09-14 15:50:30 +00:00
|
|
|
|
|
|
|
if (ret == -ENOMEM) {
|
|
|
|
/* first try to clear up some space from the GTT */
|
2010-08-07 10:01:21 +00:00
|
|
|
ret = i915_gem_evict_something(dev, obj->size,
|
|
|
|
alignment);
|
2009-09-14 15:50:30 +00:00
|
|
|
if (ret) {
|
|
|
|
/* now try to shrink everyone else */
|
2010-01-27 13:36:32 +00:00
|
|
|
if (gfpmask) {
|
|
|
|
gfpmask = 0;
|
|
|
|
goto search_free;
|
2009-09-14 15:50:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto search_free;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an AGP memory structure pointing at our pages, and bind it
|
|
|
|
* into the GTT.
|
|
|
|
*/
|
|
|
|
obj_priv->agp_mem = drm_agp_bind_pages(dev,
|
2009-03-19 21:10:50 +00:00
|
|
|
obj_priv->pages,
|
2009-09-14 15:50:30 +00:00
|
|
|
obj->size >> PAGE_SHIFT,
|
2008-10-15 02:55:10 +00:00
|
|
|
obj_priv->gtt_offset,
|
|
|
|
obj_priv->agp_type);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj_priv->agp_mem == NULL) {
|
2009-03-19 21:10:50 +00:00
|
|
|
i915_gem_object_put_pages(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
|
|
obj_priv->gtt_space = NULL;
|
2009-09-14 15:50:30 +00:00
|
|
|
|
2010-08-07 10:01:21 +00:00
|
|
|
ret = i915_gem_evict_something(dev, obj->size, alignment);
|
2009-09-20 23:22:34 +00:00
|
|
|
if (ret)
|
2009-09-14 15:50:30 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
goto search_free;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-08-07 10:01:20 +00:00
|
|
|
/* keep track of bounds object by adding it to the inactive list */
|
|
|
|
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_add_gtt(dev_priv, obj->size);
|
2010-08-07 10:01:20 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* Assert that the object is not currently in any GPU domain. As it
|
|
|
|
* wasn't in the GTT, there shouldn't be any way it could have been in
|
|
|
|
* a GPU cache
|
|
|
|
*/
|
2009-06-06 08:46:02 +00:00
|
|
|
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
|
|
|
|
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_clflush_object(struct drm_gem_object *obj)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* If we don't have a page list set up, then we're not pinned
|
|
|
|
* to GPU, and we can ignore the cache flush because it'll happen
|
|
|
|
* again at bind time.
|
|
|
|
*/
|
2009-03-19 21:10:50 +00:00
|
|
|
if (obj_priv->pages == NULL)
|
2008-07-30 19:06:12 +00:00
|
|
|
return;
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_clflush(obj);
|
2009-05-27 01:46:16 +00:00
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/** Flushes any GPU write domain for the object if it's dirty. */
|
2010-06-07 13:03:05 +00:00
|
|
|
static int
|
2010-02-11 21:37:04 +00:00
|
|
|
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
|
|
|
|
bool pipelined)
|
2008-11-14 21:35:19 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain;
|
2008-11-14 21:35:19 +00:00
|
|
|
|
|
|
|
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
|
2010-06-07 13:03:05 +00:00
|
|
|
return 0;
|
2008-11-14 21:35:19 +00:00
|
|
|
|
|
|
|
/* Queue the GPU write cache flushing we need. */
|
2009-08-25 10:15:50 +00:00
|
|
|
old_write_domain = obj->write_domain;
|
2010-09-20 11:50:23 +00:00
|
|
|
i915_gem_flush_ring(dev, NULL,
|
2010-09-18 10:02:01 +00:00
|
|
|
to_intel_bo(obj)->ring,
|
|
|
|
0, obj->write_domain);
|
2010-09-14 11:50:34 +00:00
|
|
|
BUG_ON(obj->write_domain);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
obj->read_domains,
|
|
|
|
old_write_domain);
|
2010-02-11 21:37:04 +00:00
|
|
|
|
|
|
|
if (pipelined)
|
|
|
|
return 0;
|
|
|
|
|
2010-09-14 12:03:28 +00:00
|
|
|
return i915_gem_object_wait_rendering(obj, true);
|
2008-11-14 21:35:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Flushes the GTT write domain for the object if it's dirty. */
|
|
|
|
static void
|
|
|
|
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
|
|
|
|
{
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain;
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* No actual flushing is required for the GTT write domain. Writes
|
|
|
|
* to it immediately go to main memory as far as we know, so there's
|
|
|
|
* no chipset flush. It also doesn't land in render cache.
|
|
|
|
*/
|
2009-08-25 10:15:50 +00:00
|
|
|
old_write_domain = obj->write_domain;
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->write_domain = 0;
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
obj->read_domains,
|
|
|
|
old_write_domain);
|
2008-11-14 21:35:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Flushes the CPU write domain for the object if it's dirty. */
|
|
|
|
static void
|
|
|
|
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain;
|
2008-11-14 21:35:19 +00:00
|
|
|
|
|
|
|
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
|
|
|
|
return;
|
|
|
|
|
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
drm_agp_chipset_flush(dev);
|
2009-08-25 10:15:50 +00:00
|
|
|
old_write_domain = obj->write_domain;
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->write_domain = 0;
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
obj->read_domains,
|
|
|
|
old_write_domain);
|
2008-11-14 21:35:19 +00:00
|
|
|
}
|
|
|
|
|
2008-11-10 18:53:25 +00:00
|
|
|
/**
|
|
|
|
* Moves a single object to the GTT read, and possibly write domain.
|
|
|
|
*
|
|
|
|
* This function returns when the move is complete, including waiting on
|
|
|
|
* flushes to occur.
|
|
|
|
*/
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
int
|
2008-11-10 18:53:25 +00:00
|
|
|
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain, old_read_domains;
|
2008-11-14 21:35:19 +00:00
|
|
|
int ret;
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2008-11-26 21:58:13 +00:00
|
|
|
/* Not valid to be called on unbound objects. */
|
|
|
|
if (obj_priv->gtt_space == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-02-11 21:37:04 +00:00
|
|
|
ret = i915_gem_object_flush_gpu_write_domain(obj, false);
|
2008-11-14 21:35:19 +00:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2010-09-13 22:56:38 +00:00
|
|
|
i915_gem_object_flush_cpu_write_domain(obj);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2010-02-11 21:37:04 +00:00
|
|
|
if (write) {
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_wait_rendering(obj, true);
|
2010-02-11 21:37:04 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2010-09-13 22:56:38 +00:00
|
|
|
old_write_domain = obj->write_domain;
|
|
|
|
old_read_domains = obj->read_domains;
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* It should now be out of any other write domains, and we can update
|
|
|
|
* the domain values for our changes.
|
|
|
|
*/
|
|
|
|
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
|
|
|
|
obj->read_domains |= I915_GEM_DOMAIN_GTT;
|
|
|
|
if (write) {
|
2010-09-13 22:56:38 +00:00
|
|
|
obj->read_domains = I915_GEM_DOMAIN_GTT;
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->write_domain = I915_GEM_DOMAIN_GTT;
|
|
|
|
obj_priv->dirty = 1;
|
2008-11-10 18:53:25 +00:00
|
|
|
}
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
old_read_domains,
|
|
|
|
old_write_domain);
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-25 05:09:39 +00:00
|
|
|
/*
|
|
|
|
* Prepare buffer for display plane. Use uninterruptible for possible flush
|
|
|
|
* wait, as in modesetting process we're not supposed to be interrupted.
|
|
|
|
*/
|
|
|
|
int
|
2010-09-14 11:50:34 +00:00
|
|
|
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
|
|
|
|
bool pipelined)
|
2009-11-25 05:09:39 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-02-11 21:37:04 +00:00
|
|
|
uint32_t old_read_domains;
|
2009-11-25 05:09:39 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Not valid to be called on unbound objects. */
|
|
|
|
if (obj_priv->gtt_space == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-09-26 21:47:46 +00:00
|
|
|
ret = i915_gem_object_flush_gpu_write_domain(obj, true);
|
2010-09-14 11:50:34 +00:00
|
|
|
if (ret)
|
drm/i915: allow lazy emitting of requests
Sometimes (like when flushing in preparation of batchbuffer execution)
we know that we'll emit a request but haven't yet done so. Allow this
case by simply taking the next seqno by default. Ensure that a request
is eventually emitted before waiting for an request by issuing it
in i915_wait_request iff this is not yet done.
Also replace one open-coded version of i915_gem_object_wait_rendering,
to prevent future code-diversion.
Chris Wilson asked me to explain and clarify what this patch does and why.
Here it goes:
Old way of moving objects onto the active list and associating them with a
reques:
1. i915_add_request + store the returned seqno somewhere
2. i915_gem_object_move_to_active (with the stored seqno as parameter)
For the current users, this is all fine. But I'd like to associate objects
(and fence regs) with the batchbuffer request deep down in the execbuf
call-chain. I thought about three ways of implementing this.
a) Don't care, just emit request when we need a new seqno. When heavily
pipelining fence reg changes, this would have caused tons of superflous
request (and corresponding irqs).
b) Thread all changed fences, objects, whatever through the execbuf-maze,
so that when we emit a request, we can store the new seqno at all the right
places.
c) Kill that seqno-threading-around business by simply storing the next
seqno, i.e. allow 2. to be done before 1. in the above sequence.
I've decided to implement c) (in this patch). The following patches are
just fall-out that resulted from this small conceptual change.
* We can handle the flushing list processing where we actually emit a flush
(i915_gem_flush and i915_retire_commands) instead of in i915_add_request.
The code makes IMHO more sense this way (and i915_add_request looses the
flush_domains parameter, obviously).
* We can avoid emitting unnecessary requests. IMHO there's no point in
emitting more than one request per batchbuffer (with or without an
corresponding irq).
* By enforcing 2. before 1. ordering in the above sequence the seqno
argument of i915_gem_object_move_to_active is redundant and can be
dropped.
v2: Now i915_wait_request issues request if it is not yet emitted.
Also introduce i915_gem_next_request_seqno(dev) just in case we ever
need to do some prep work before using a new seqno.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[ickle: Keep i915_gem_object_set_to_display_plane() uninterruptible.]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-02-11 21:13:59 +00:00
|
|
|
return ret;
|
2009-11-25 05:09:39 +00:00
|
|
|
|
2010-09-26 21:47:46 +00:00
|
|
|
/* Currently, we are always called from an non-interruptible context. */
|
|
|
|
if (!pipelined) {
|
|
|
|
ret = i915_gem_object_wait_rendering(obj, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-05-27 12:18:14 +00:00
|
|
|
i915_gem_object_flush_cpu_write_domain(obj);
|
|
|
|
|
2009-11-25 05:09:39 +00:00
|
|
|
old_read_domains = obj->read_domains;
|
2010-09-20 11:50:23 +00:00
|
|
|
obj->read_domains |= I915_GEM_DOMAIN_GTT;
|
2009-11-25 05:09:39 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
old_read_domains,
|
2010-02-11 21:37:04 +00:00
|
|
|
obj->write_domain);
|
2009-11-25 05:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/**
|
|
|
|
* Moves a single object to the CPU read, and possibly write domain.
|
|
|
|
*
|
|
|
|
* This function returns when the move is complete, including waiting on
|
|
|
|
* flushes to occur.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
|
|
|
|
{
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain, old_read_domains;
|
2008-11-14 21:35:19 +00:00
|
|
|
int ret;
|
|
|
|
|
2010-02-11 21:37:04 +00:00
|
|
|
ret = i915_gem_object_flush_gpu_write_domain(obj, false);
|
2008-11-14 21:35:19 +00:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
i915_gem_object_flush_gtt_write_domain(obj);
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* If we have a partially-valid cache of the object in the CPU,
|
|
|
|
* finish invalidating it and free the per-page flags.
|
2008-11-10 18:53:25 +00:00
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
i915_gem_object_set_to_full_cpu_read_domain(obj);
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2010-09-13 22:56:38 +00:00
|
|
|
if (write) {
|
2010-09-14 12:03:28 +00:00
|
|
|
ret = i915_gem_object_wait_rendering(obj, true);
|
2010-09-13 22:56:38 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
old_write_domain = obj->write_domain;
|
|
|
|
old_read_domains = obj->read_domains;
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* Flush the CPU cache if it's still invalid. */
|
|
|
|
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
|
2008-11-10 18:53:25 +00:00
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->read_domains |= I915_GEM_DOMAIN_CPU;
|
2008-11-10 18:53:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* It should now be out of any other write domains, and we can update
|
|
|
|
* the domain values for our changes.
|
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
|
|
|
|
|
|
|
|
/* If we're writing through the CPU, then the GPU read domains will
|
|
|
|
* need to be invalidated at next use.
|
|
|
|
*/
|
|
|
|
if (write) {
|
2010-09-20 11:50:23 +00:00
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
}
|
2008-11-10 18:53:25 +00:00
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
old_read_domains,
|
|
|
|
old_write_domain);
|
|
|
|
|
2008-11-10 18:53:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/*
|
|
|
|
* Set the next domain for the specified object. This
|
|
|
|
* may not actually perform the necessary flushing/invaliding though,
|
|
|
|
* as that may want to be batched with other set_domain operations
|
|
|
|
*
|
|
|
|
* This is (we hope) the only really tricky part of gem. The goal
|
|
|
|
* is fairly simple -- track which caches hold bits of the object
|
|
|
|
* and make sure they remain coherent. A few concrete examples may
|
|
|
|
* help to explain how it works. For shorthand, we use the notation
|
|
|
|
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
|
|
|
|
* a pair of read and write domain masks.
|
|
|
|
*
|
|
|
|
* Case 1: the batch buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Written by CPU
|
|
|
|
* 3. Mapped to GTT
|
|
|
|
* 4. Read by GPU
|
|
|
|
* 5. Unmapped from GTT
|
|
|
|
* 6. Freed
|
|
|
|
*
|
|
|
|
* Let's take these a step at a time
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* Pages allocated from the kernel may still have
|
|
|
|
* cache contents, so we set them to (CPU, CPU) always.
|
|
|
|
* 2. Written by CPU (using pwrite)
|
|
|
|
* The pwrite function calls set_domain (CPU, CPU) and
|
|
|
|
* this function does nothing (as nothing changes)
|
|
|
|
* 3. Mapped by GTT
|
|
|
|
* This function asserts that the object is not
|
|
|
|
* currently in any GPU-based read or write domains
|
|
|
|
* 4. Read by GPU
|
|
|
|
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
|
|
|
|
* As write_domain is zero, this function adds in the
|
|
|
|
* current read domains (CPU+COMMAND, 0).
|
|
|
|
* flush_domains is set to CPU.
|
|
|
|
* invalidate_domains is set to COMMAND
|
|
|
|
* clflush is run to get data out of the CPU caches
|
|
|
|
* then i915_dev_set_domain calls i915_gem_flush to
|
|
|
|
* emit an MI_FLUSH and drm_agp_chipset_flush
|
|
|
|
* 5. Unmapped from GTT
|
|
|
|
* i915_gem_object_unbind calls set_domain (CPU, CPU)
|
|
|
|
* flush_domains and invalidate_domains end up both zero
|
|
|
|
* so no flushing/invalidating happens
|
|
|
|
* 6. Freed
|
|
|
|
* yay, done
|
|
|
|
*
|
|
|
|
* Case 2: The shared render buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Mapped to GTT
|
|
|
|
* 3. Read/written by GPU
|
|
|
|
* 4. set_domain to (CPU,CPU)
|
|
|
|
* 5. Read/written by CPU
|
|
|
|
* 6. Read/written by GPU
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* Same as last example, (CPU, CPU)
|
|
|
|
* 2. Mapped to GTT
|
|
|
|
* Nothing changes (assertions find that it is not in the GPU)
|
|
|
|
* 3. Read/written by GPU
|
|
|
|
* execbuffer calls set_domain (RENDER, RENDER)
|
|
|
|
* flush_domains gets CPU
|
|
|
|
* invalidate_domains gets GPU
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH and drm_agp_chipset_flush
|
|
|
|
* 4. set_domain (CPU, CPU)
|
|
|
|
* flush_domains gets GPU
|
|
|
|
* invalidate_domains gets CPU
|
|
|
|
* wait_rendering (obj) to make sure all drawing is complete.
|
|
|
|
* This will include an MI_FLUSH to get the data from GPU
|
|
|
|
* to memory
|
|
|
|
* clflush (obj) to invalidate the CPU cache
|
|
|
|
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
|
|
|
|
* 5. Read/written by CPU
|
|
|
|
* cache lines are loaded and dirtied
|
|
|
|
* 6. Read written by GPU
|
|
|
|
* Same as last GPU access
|
|
|
|
*
|
|
|
|
* Case 3: The constant buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Written by CPU
|
|
|
|
* 3. Read by GPU
|
|
|
|
* 4. Updated (written) by CPU again
|
|
|
|
* 5. Read by GPU
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* (CPU, CPU)
|
|
|
|
* 2. Written by CPU
|
|
|
|
* (CPU, CPU)
|
|
|
|
* 3. Read by GPU
|
|
|
|
* (CPU+RENDER, 0)
|
|
|
|
* flush_domains = CPU
|
|
|
|
* invalidate_domains = RENDER
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH
|
|
|
|
* drm_agp_chipset_flush
|
|
|
|
* 4. Updated (written) by CPU again
|
|
|
|
* (CPU, CPU)
|
|
|
|
* flush_domains = 0 (no previous write domain)
|
|
|
|
* invalidate_domains = 0 (no new read domains)
|
|
|
|
* 5. Read by GPU
|
|
|
|
* (CPU+RENDER, 0)
|
|
|
|
* flush_domains = CPU
|
|
|
|
* invalidate_domains = RENDER
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH
|
|
|
|
* drm_agp_chipset_flush
|
|
|
|
*/
|
2008-11-21 07:11:08 +00:00
|
|
|
static void
|
2009-02-19 22:40:50 +00:00
|
|
|
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-09-18 10:02:01 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
uint32_t invalidate_domains = 0;
|
|
|
|
uint32_t flush_domains = 0;
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_read_domains;
|
2008-11-14 21:35:19 +00:00
|
|
|
|
2009-08-17 20:31:43 +00:00
|
|
|
intel_mark_busy(dev, obj);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/*
|
|
|
|
* If the object isn't moving to a new write domain,
|
|
|
|
* let the object stay in multiple read domains
|
|
|
|
*/
|
2009-02-19 22:40:50 +00:00
|
|
|
if (obj->pending_write_domain == 0)
|
|
|
|
obj->pending_read_domains |= obj->read_domains;
|
2008-07-30 19:06:12 +00:00
|
|
|
else
|
|
|
|
obj_priv->dirty = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush the current write domain if
|
|
|
|
* the new read domains don't match. Invalidate
|
|
|
|
* any read domains which differ from the old
|
|
|
|
* write domain
|
|
|
|
*/
|
2009-02-19 22:40:50 +00:00
|
|
|
if (obj->write_domain &&
|
|
|
|
obj->write_domain != obj->pending_read_domains) {
|
2008-07-30 19:06:12 +00:00
|
|
|
flush_domains |= obj->write_domain;
|
2009-02-19 22:40:50 +00:00
|
|
|
invalidate_domains |=
|
|
|
|
obj->pending_read_domains & ~obj->write_domain;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Invalidate any read caches which may have
|
|
|
|
* stale data. That is, any new read domains.
|
|
|
|
*/
|
2009-02-19 22:40:50 +00:00
|
|
|
invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
|
2010-09-29 10:39:53 +00:00
|
|
|
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
|
2008-07-30 19:06:12 +00:00
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
old_read_domains = obj->read_domains;
|
|
|
|
|
2009-02-19 22:54:51 +00:00
|
|
|
/* The actual obj->write_domain will be updated with
|
|
|
|
* pending_write_domain after we emit the accumulated flush for all
|
|
|
|
* of our domain changes in execbuffers (which clears objects'
|
|
|
|
* write_domains). So if we have a current write domain that we
|
|
|
|
* aren't changing, set pending_write_domain to that.
|
|
|
|
*/
|
|
|
|
if (flush_domains == 0 && obj->pending_write_domain == 0)
|
|
|
|
obj->pending_write_domain = obj->write_domain;
|
2009-02-19 22:40:50 +00:00
|
|
|
obj->read_domains = obj->pending_read_domains;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
dev->invalidate_domains |= invalidate_domains;
|
|
|
|
dev->flush_domains |= flush_domains;
|
2010-09-18 10:02:01 +00:00
|
|
|
if (obj_priv->ring)
|
|
|
|
dev_priv->mm.flush_rings |= obj_priv->ring->id;
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
old_read_domains,
|
|
|
|
obj->write_domain);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2008-11-14 21:35:19 +00:00
|
|
|
* Moves the object from a partially CPU read to a full one.
|
2008-07-30 19:06:12 +00:00
|
|
|
*
|
2008-11-14 21:35:19 +00:00
|
|
|
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
|
|
|
|
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
|
2008-07-30 19:06:12 +00:00
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
static void
|
|
|
|
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
if (!obj_priv->page_cpu_valid)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If we're partially in the CPU read domain, finish moving it in.
|
|
|
|
*/
|
|
|
|
if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
|
|
|
|
if (obj_priv->page_cpu_valid[i])
|
|
|
|
continue;
|
2009-03-19 21:10:50 +00:00
|
|
|
drm_clflush_pages(obj_priv->pages + i, 1);
|
2008-11-14 21:35:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free the page_cpu_valid mappings which are now stale, whether
|
|
|
|
* or not we've got I915_GEM_DOMAIN_CPU.
|
|
|
|
*/
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(obj_priv->page_cpu_valid);
|
2008-11-14 21:35:19 +00:00
|
|
|
obj_priv->page_cpu_valid = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set the CPU read domain on a range of the object.
|
|
|
|
*
|
|
|
|
* The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
|
|
|
|
* not entirely valid. The page_cpu_valid member of the object flags which
|
|
|
|
* pages have been flushed, and will be respected by
|
|
|
|
* i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
|
|
|
|
* of the whole object.
|
|
|
|
*
|
|
|
|
* This function returns when the move is complete, including waiting on
|
|
|
|
* flushes to occur.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|
|
|
uint64_t offset, uint64_t size)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_read_domains;
|
2008-11-14 21:35:19 +00:00
|
|
|
int i, ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
if (offset == 0 && size == obj->size)
|
|
|
|
return i915_gem_object_set_to_cpu_domain(obj, 0);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-02-11 21:37:04 +00:00
|
|
|
ret = i915_gem_object_flush_gpu_write_domain(obj, false);
|
2008-11-14 21:35:19 +00:00
|
|
|
if (ret != 0)
|
2008-11-03 22:38:17 +00:00
|
|
|
return ret;
|
2008-11-14 21:35:19 +00:00
|
|
|
i915_gem_object_flush_gtt_write_domain(obj);
|
|
|
|
|
|
|
|
/* If we're already fully in the CPU read domain, we're done. */
|
|
|
|
if (obj_priv->page_cpu_valid == NULL &&
|
|
|
|
(obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
|
|
|
|
return 0;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* Otherwise, create/clear the per-page CPU read domain flag if we're
|
|
|
|
* newly adding I915_GEM_DOMAIN_CPU
|
|
|
|
*/
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj_priv->page_cpu_valid == NULL) {
|
2009-03-24 19:23:04 +00:00
|
|
|
obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
|
|
|
|
GFP_KERNEL);
|
2008-11-14 21:35:19 +00:00
|
|
|
if (obj_priv->page_cpu_valid == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
|
|
|
|
memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* Flush the cache on any pages that are still invalid from the CPU's
|
|
|
|
* perspective.
|
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
|
|
|
|
i++) {
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj_priv->page_cpu_valid[i])
|
|
|
|
continue;
|
|
|
|
|
2009-03-19 21:10:50 +00:00
|
|
|
drm_clflush_pages(obj_priv->pages + i, 1);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
obj_priv->page_cpu_valid[i] = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-14 21:35:19 +00:00
|
|
|
/* It should now be out of any other write domains, and we can update
|
|
|
|
* the domain values for our changes.
|
|
|
|
*/
|
|
|
|
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
old_read_domains = obj->read_domains;
|
2008-11-14 21:35:19 +00:00
|
|
|
obj->read_domains |= I915_GEM_DOMAIN_CPU;
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
old_read_domains,
|
|
|
|
obj->write_domain);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Pin an object to the GTT and evaluate the relocations landing in it.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
|
|
struct drm_file *file_priv,
|
2010-10-14 11:10:41 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2008-10-31 02:38:48 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-10-14 11:10:41 +00:00
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
2008-07-30 19:06:12 +00:00
|
|
|
int i, ret;
|
2009-12-18 03:05:42 +00:00
|
|
|
bool need_fence;
|
|
|
|
|
|
|
|
need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj_priv->tiling_mode != I915_TILING_NONE;
|
|
|
|
|
|
|
|
/* Check fence reg constraints and rebind if necessary */
|
2010-05-27 12:18:15 +00:00
|
|
|
if (need_fence &&
|
|
|
|
!i915_gem_object_fence_offset_ok(obj,
|
|
|
|
obj_priv->tiling_mode)) {
|
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* Choose the GTT offset for our buffer and put it there. */
|
|
|
|
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-12-18 03:05:42 +00:00
|
|
|
/*
|
|
|
|
* Pre-965 chips need a fence register set up in order to
|
|
|
|
* properly handle blits to/from tiled surfaces.
|
|
|
|
*/
|
|
|
|
if (need_fence) {
|
2010-09-20 10:40:50 +00:00
|
|
|
ret = i915_gem_object_get_fence_reg(obj, true);
|
2009-12-18 03:05:42 +00:00
|
|
|
if (ret != 0) {
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-09-20 10:40:50 +00:00
|
|
|
|
|
|
|
dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
|
2009-12-18 03:05:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
entry->offset = obj_priv->gtt_offset;
|
|
|
|
|
|
|
|
/* Apply the relocations, using the GTT aperture to avoid cache
|
|
|
|
* flushing requirements.
|
|
|
|
*/
|
2010-10-14 11:10:41 +00:00
|
|
|
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
|
2008-07-30 19:06:12 +00:00
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
2010-10-14 11:10:41 +00:00
|
|
|
struct drm_i915_gem_relocation_entry reloc;
|
2008-07-30 19:06:12 +00:00
|
|
|
struct drm_gem_object *target_obj;
|
|
|
|
struct drm_i915_gem_object *target_obj_priv;
|
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
ret = __copy_from_user_inatomic(&reloc,
|
|
|
|
user_relocs+i,
|
|
|
|
sizeof(reloc));
|
|
|
|
if (ret) {
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
|
2010-10-14 11:10:41 +00:00
|
|
|
reloc.target_handle);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (target_obj == NULL) {
|
|
|
|
i915_gem_object_unpin(obj);
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2010-03-08 12:35:02 +00:00
|
|
|
target_obj_priv = to_intel_bo(target_obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-09-09 20:15:15 +00:00
|
|
|
#if WATCH_RELOC
|
|
|
|
DRM_INFO("%s: obj %p offset %08x target %d "
|
|
|
|
"read %08x write %08x gtt %08x "
|
|
|
|
"presumed %08x delta %08x\n",
|
|
|
|
__func__,
|
|
|
|
obj,
|
2010-10-14 11:10:41 +00:00
|
|
|
(int) reloc.offset,
|
|
|
|
(int) reloc.target_handle,
|
|
|
|
(int) reloc.read_domains,
|
|
|
|
(int) reloc.write_domain,
|
2009-09-09 20:15:15 +00:00
|
|
|
(int) target_obj_priv->gtt_offset,
|
2010-10-14 11:10:41 +00:00
|
|
|
(int) reloc.presumed_offset,
|
|
|
|
reloc.delta);
|
2009-09-09 20:15:15 +00:00
|
|
|
#endif
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* The target buffer should have appeared before us in the
|
|
|
|
* exec_object list, so it should have a GTT space bound by now.
|
|
|
|
*/
|
|
|
|
if (target_obj_priv->gtt_space == NULL) {
|
|
|
|
DRM_ERROR("No GTT space found for object %d\n",
|
2010-10-14 11:10:41 +00:00
|
|
|
reloc.target_handle);
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-09-09 20:15:15 +00:00
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.write_domain & (reloc.write_domain - 1)) {
|
2010-02-19 10:52:02 +00:00
|
|
|
DRM_ERROR("reloc with multiple write domains: "
|
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset,
|
|
|
|
reloc.read_domains,
|
|
|
|
reloc.write_domain);
|
2010-10-02 13:59:17 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
2010-02-19 10:52:02 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
|
|
|
|
reloc.read_domains & I915_GEM_DOMAIN_CPU) {
|
2008-11-14 21:35:19 +00:00
|
|
|
DRM_ERROR("reloc with read/write CPU domains: "
|
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset,
|
|
|
|
reloc.read_domains,
|
|
|
|
reloc.write_domain);
|
2009-02-11 14:26:32 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
2008-11-14 21:35:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.write_domain && target_obj->pending_write_domain &&
|
|
|
|
reloc.write_domain != target_obj->pending_write_domain) {
|
2008-07-30 19:06:12 +00:00
|
|
|
DRM_ERROR("Write domain conflict: "
|
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"new %08x old %08x\n",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset,
|
|
|
|
reloc.write_domain,
|
2008-07-30 19:06:12 +00:00
|
|
|
target_obj->pending_write_domain);
|
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
target_obj->pending_read_domains |= reloc.read_domains;
|
|
|
|
target_obj->pending_write_domain |= reloc.write_domain;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
/* If the relocation already has the right value in it, no
|
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
2010-10-14 11:10:41 +00:00
|
|
|
if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-09-09 20:15:15 +00:00
|
|
|
/* Check that the relocation address is valid... */
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.offset > obj->size - 4) {
|
2009-09-09 20:15:15 +00:00
|
|
|
DRM_ERROR("Relocation beyond object bounds: "
|
|
|
|
"obj %p target %d offset %d size %d.\n",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset, (int) obj->size);
|
2009-09-09 20:15:15 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.offset & 3) {
|
2009-09-09 20:15:15 +00:00
|
|
|
DRM_ERROR("Relocation not 4-byte aligned: "
|
|
|
|
"obj %p target %d offset %d.\n",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset);
|
2009-09-09 20:15:15 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* and points to somewhere within the target object. */
|
2010-10-14 11:10:41 +00:00
|
|
|
if (reloc.delta >= target_obj->size) {
|
2009-09-09 20:15:15 +00:00
|
|
|
DRM_ERROR("Relocation beyond target object bounds: "
|
|
|
|
"obj %p target %d delta %d size %d.\n",
|
2010-10-14 11:10:41 +00:00
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.delta, (int) target_obj->size);
|
2009-09-09 20:15:15 +00:00
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-10-14 11:44:48 +00:00
|
|
|
reloc.delta += target_obj_priv->gtt_offset;
|
|
|
|
if (obj->write_domain == I915_GEM_DOMAIN_CPU) {
|
|
|
|
uint32_t page_offset = reloc.offset & ~PAGE_MASK;
|
|
|
|
char *vaddr;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-10-14 11:44:48 +00:00
|
|
|
vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
|
|
|
|
*(uint32_t *)(vaddr + page_offset) = reloc.delta;
|
|
|
|
kunmap_atomic(vaddr, KM_USER0);
|
|
|
|
} else {
|
|
|
|
uint32_t __iomem *reloc_entry;
|
|
|
|
void __iomem *reloc_page;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the page containing the relocation we're going to perform. */
|
|
|
|
reloc.offset += obj_priv->gtt_offset;
|
|
|
|
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
|
|
|
|
reloc.offset & PAGE_MASK,
|
|
|
|
KM_USER0);
|
|
|
|
reloc_entry = (uint32_t __iomem *)
|
|
|
|
(reloc_page + (reloc.offset & ~PAGE_MASK));
|
|
|
|
iowrite32(reloc.delta, reloc_entry);
|
|
|
|
io_mapping_unmap_atomic(reloc_page, KM_USER0);
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
drm_gem_object_unreference(target_obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Throttle our rendering by waiting until the ring has completed our requests
|
|
|
|
* emitted over 20 msec ago.
|
|
|
|
*
|
2009-06-03 07:27:35 +00:00
|
|
|
* Note that if we were to use the current jiffies each time around the loop,
|
|
|
|
* we wouldn't escape the function with any frames outstanding if the time to
|
|
|
|
* render a frame was over 20ms.
|
|
|
|
*
|
2008-07-30 19:06:12 +00:00
|
|
|
* This should get us reasonable parallelism between CPU and GPU but also
|
|
|
|
* relatively low latency when blocking on a particular request to finish.
|
|
|
|
*/
|
|
|
|
static int
|
2010-09-24 15:02:42 +00:00
|
|
|
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2010-09-24 15:02:42 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
2009-06-03 07:27:35 +00:00
|
|
|
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
|
2010-09-24 15:02:42 +00:00
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
struct intel_ring_buffer *ring = NULL;
|
|
|
|
u32 seqno = 0;
|
|
|
|
int ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_lock(&file_priv->mm.lock);
|
2010-09-24 15:02:42 +00:00
|
|
|
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
|
2009-06-03 07:27:35 +00:00
|
|
|
if (time_after_eq(request->emitted_jiffies, recent_enough))
|
|
|
|
break;
|
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
ring = request->ring;
|
|
|
|
seqno = request->seqno;
|
2009-06-03 07:27:35 +00:00
|
|
|
}
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_unlock(&file_priv->mm.lock);
|
2010-09-24 15:02:42 +00:00
|
|
|
|
|
|
|
if (seqno == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
|
|
|
|
/* And wait for the seqno passing without holding any locks and
|
|
|
|
* causing extra latency for others. This is safe as the irq
|
|
|
|
* generation is designed to be run atomically and so is
|
|
|
|
* lockless.
|
|
|
|
*/
|
|
|
|
ring->user_irq_get(dev, ring);
|
|
|
|
ret = wait_event_interruptible(ring->irq_queue,
|
|
|
|
i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
|
|
|
|
|| atomic_read(&dev_priv->mm.wedged));
|
|
|
|
ring->user_irq_put(dev, ring);
|
|
|
|
|
|
|
|
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0)
|
|
|
|
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
|
2009-06-03 07:27:35 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-03-12 18:23:52 +00:00
|
|
|
static int
|
2010-10-14 11:10:41 +00:00
|
|
|
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
|
|
|
|
uint64_t exec_offset)
|
2009-03-12 18:23:52 +00:00
|
|
|
{
|
2010-10-14 11:10:41 +00:00
|
|
|
uint32_t exec_start, exec_len;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
|
|
|
|
exec_len = (uint32_t) exec->batch_len;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
if ((exec_start | exec_len) & 0x7)
|
|
|
|
return -EINVAL;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
if (!exec_start)
|
|
|
|
return -EINVAL;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2009-04-06 20:55:41 +00:00
|
|
|
return 0;
|
2009-03-12 18:23:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-10-14 11:10:41 +00:00
|
|
|
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|
|
|
int count)
|
2009-03-12 18:23:52 +00:00
|
|
|
{
|
2010-10-14 11:10:41 +00:00
|
|
|
int i;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
|
|
|
size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
|
2009-04-06 20:55:41 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
if (!access_ok(VERIFY_READ, ptr, length))
|
|
|
|
return -EFAULT;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
if (fault_in_pages_readable(ptr, length))
|
|
|
|
return -EFAULT;
|
2009-03-12 18:23:52 +00:00
|
|
|
}
|
|
|
|
|
2009-06-06 08:45:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-12 11:36:12 +00:00
|
|
|
static int
|
2009-12-18 03:05:42 +00:00
|
|
|
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
|
|
|
struct drm_i915_gem_exec_object2 *exec_list)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object **object_list = NULL;
|
|
|
|
struct drm_gem_object *batch_obj;
|
2009-03-03 19:45:57 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2009-03-11 19:30:04 +00:00
|
|
|
struct drm_clip_rect *cliprects = NULL;
|
2010-08-12 11:36:12 +00:00
|
|
|
struct drm_i915_gem_request *request = NULL;
|
2010-10-14 11:10:41 +00:00
|
|
|
int ret, i, pinned = 0;
|
2008-07-30 19:06:12 +00:00
|
|
|
uint64_t exec_offset;
|
2009-11-18 16:25:18 +00:00
|
|
|
int pin_tries, flips;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-05-21 01:08:56 +00:00
|
|
|
struct intel_ring_buffer *ring = NULL;
|
|
|
|
|
2010-09-25 09:19:17 +00:00
|
|
|
ret = i915_gem_check_is_wedged(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-10-14 11:10:41 +00:00
|
|
|
ret = validate_exec_list(exec_list, args->buffer_count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
#if WATCH_EXEC
|
|
|
|
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
|
|
|
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
|
|
|
#endif
|
2010-05-21 01:08:57 +00:00
|
|
|
if (args->flags & I915_EXEC_BSD) {
|
|
|
|
if (!HAS_BSD(dev)) {
|
|
|
|
DRM_ERROR("execbuf with wrong flag\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ring = &dev_priv->bsd_ring;
|
|
|
|
} else {
|
|
|
|
ring = &dev_priv->render_ring;
|
|
|
|
}
|
|
|
|
|
2008-09-10 21:22:49 +00:00
|
|
|
if (args->buffer_count < 1) {
|
|
|
|
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-11-22 02:49:37 +00:00
|
|
|
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
|
2009-12-18 03:05:42 +00:00
|
|
|
if (object_list == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate object list for %d buffers\n",
|
2008-07-30 19:06:12 +00:00
|
|
|
args->buffer_count);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2009-03-11 19:30:04 +00:00
|
|
|
if (args->num_cliprects != 0) {
|
2009-03-24 19:23:04 +00:00
|
|
|
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
|
|
|
|
GFP_KERNEL);
|
2010-02-09 14:25:55 +00:00
|
|
|
if (cliprects == NULL) {
|
|
|
|
ret = -ENOMEM;
|
2009-03-11 19:30:04 +00:00
|
|
|
goto pre_mutex_err;
|
2010-02-09 14:25:55 +00:00
|
|
|
}
|
2009-03-11 19:30:04 +00:00
|
|
|
|
|
|
|
ret = copy_from_user(cliprects,
|
|
|
|
(struct drm_clip_rect __user *)
|
|
|
|
(uintptr_t) args->cliprects_ptr,
|
|
|
|
sizeof(*cliprects) * args->num_cliprects);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("copy %d cliprects failed: %d\n",
|
|
|
|
args->num_cliprects, ret);
|
2010-06-23 17:03:01 +00:00
|
|
|
ret = -EFAULT;
|
2009-03-11 19:30:04 +00:00
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-12 11:36:12 +00:00
|
|
|
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
|
|
if (request == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret)
|
|
|
|
goto pre_mutex_err;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
if (dev_priv->mm.suspended) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2009-02-06 16:55:20 +00:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto pre_mutex_err;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-21 07:30:27 +00:00
|
|
|
/* Look up object handles */
|
2008-07-30 19:06:12 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
object_list[i] = drm_gem_object_lookup(dev, file_priv,
|
|
|
|
exec_list[i].handle);
|
|
|
|
if (object_list[i] == NULL) {
|
|
|
|
DRM_ERROR("Invalid object handle %d at index %d\n",
|
|
|
|
exec_list[i].handle, i);
|
2010-01-23 20:26:35 +00:00
|
|
|
/* prevent error path from reading uninitialized data */
|
|
|
|
args->buffer_count = i + 1;
|
2010-08-04 13:19:46 +00:00
|
|
|
ret = -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
goto err;
|
|
|
|
}
|
2009-03-03 19:45:57 +00:00
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(object_list[i]);
|
2009-03-03 19:45:57 +00:00
|
|
|
if (obj_priv->in_execbuffer) {
|
|
|
|
DRM_ERROR("Object %p appears more than once in object list\n",
|
|
|
|
object_list[i]);
|
2010-01-23 20:26:35 +00:00
|
|
|
/* prevent error path from reading uninitialized data */
|
|
|
|
args->buffer_count = i + 1;
|
2010-08-04 13:19:46 +00:00
|
|
|
ret = -EINVAL;
|
2009-03-03 19:45:57 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
obj_priv->in_execbuffer = true;
|
2008-11-21 07:30:27 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2008-11-21 07:30:27 +00:00
|
|
|
/* Pin and relocate */
|
|
|
|
for (pin_tries = 0; ; pin_tries++) {
|
|
|
|
ret = 0;
|
2009-03-12 18:23:52 +00:00
|
|
|
|
2008-11-21 07:30:27 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
object_list[i]->pending_read_domains = 0;
|
|
|
|
object_list[i]->pending_write_domain = 0;
|
|
|
|
ret = i915_gem_object_pin_and_relocate(object_list[i],
|
|
|
|
file_priv,
|
2010-10-14 11:10:41 +00:00
|
|
|
&exec_list[i]);
|
2008-11-21 07:30:27 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
pinned = i + 1;
|
|
|
|
}
|
|
|
|
/* success */
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* error other than GTT full, or we've already tried again */
|
2009-06-06 08:46:03 +00:00
|
|
|
if (ret != -ENOSPC || pin_tries >= 1) {
|
2009-09-14 15:50:30 +00:00
|
|
|
if (ret != -ERESTARTSYS) {
|
|
|
|
unsigned long long total_size = 0;
|
2010-05-27 12:18:19 +00:00
|
|
|
int num_fences = 0;
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
2010-07-02 07:57:15 +00:00
|
|
|
obj_priv = to_intel_bo(object_list[i]);
|
2010-05-27 12:18:19 +00:00
|
|
|
|
2009-09-14 15:50:30 +00:00
|
|
|
total_size += object_list[i]->size;
|
2010-05-27 12:18:19 +00:00
|
|
|
num_fences +=
|
|
|
|
exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj_priv->tiling_mode != I915_TILING_NONE;
|
|
|
|
}
|
|
|
|
DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
|
2009-09-14 15:50:30 +00:00
|
|
|
pinned+1, args->buffer_count,
|
2010-05-27 12:18:19 +00:00
|
|
|
total_size, num_fences,
|
|
|
|
ret);
|
2010-09-30 10:46:12 +00:00
|
|
|
DRM_ERROR("%u objects [%u pinned, %u GTT], "
|
|
|
|
"%zu object bytes [%zu pinned], "
|
|
|
|
"%zu /%zu gtt bytes\n",
|
|
|
|
dev_priv->mm.object_count,
|
|
|
|
dev_priv->mm.pin_count,
|
|
|
|
dev_priv->mm.gtt_count,
|
|
|
|
dev_priv->mm.object_memory,
|
|
|
|
dev_priv->mm.pin_memory,
|
|
|
|
dev_priv->mm.gtt_memory,
|
|
|
|
dev_priv->mm.gtt_total);
|
2009-09-14 15:50:30 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
goto err;
|
|
|
|
}
|
2008-11-21 07:30:27 +00:00
|
|
|
|
|
|
|
/* unpin all of our buffers */
|
|
|
|
for (i = 0; i < pinned; i++)
|
|
|
|
i915_gem_object_unpin(object_list[i]);
|
2008-12-10 18:09:41 +00:00
|
|
|
pinned = 0;
|
2008-11-21 07:30:27 +00:00
|
|
|
|
|
|
|
/* evict everyone we can from the aperture */
|
|
|
|
ret = i915_gem_evict_everything(dev);
|
2009-09-14 15:50:30 +00:00
|
|
|
if (ret && ret != -ENOSPC)
|
2008-11-21 07:30:27 +00:00
|
|
|
goto err;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the pending read domains for the batch buffer to COMMAND */
|
|
|
|
batch_obj = object_list[args->buffer_count-1];
|
2009-06-06 08:45:58 +00:00
|
|
|
if (batch_obj->pending_write_domain) {
|
|
|
|
DRM_ERROR("Attempting to use self-modifying batch buffer\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-06-06 08:45:57 +00:00
|
|
|
/* Sanity check the batch buffer, prior to moving objects */
|
|
|
|
exec_offset = exec_list[args->buffer_count - 1].offset;
|
|
|
|
ret = i915_gem_check_execbuffer (args, exec_offset);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("execbuf with invalid offset/length\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-11-21 07:23:03 +00:00
|
|
|
/* Zero the global flush/invalidate flags. These
|
|
|
|
* will be modified as new domains are computed
|
|
|
|
* for each object
|
|
|
|
*/
|
|
|
|
dev->invalidate_domains = 0;
|
|
|
|
dev->flush_domains = 0;
|
2010-09-18 10:02:01 +00:00
|
|
|
dev_priv->mm.flush_rings = 0;
|
2008-11-21 07:23:03 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
struct drm_gem_object *obj = object_list[i];
|
|
|
|
|
2008-11-21 07:23:03 +00:00
|
|
|
/* Compute new gpu domains and update invalidate/flush */
|
2009-02-19 22:40:50 +00:00
|
|
|
i915_gem_object_set_to_gpu_domain(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-21 07:23:03 +00:00
|
|
|
if (dev->invalidate_domains | dev->flush_domains) {
|
|
|
|
#if WATCH_EXEC
|
|
|
|
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
|
|
|
__func__,
|
|
|
|
dev->invalidate_domains,
|
|
|
|
dev->flush_domains);
|
|
|
|
#endif
|
2010-09-20 11:50:23 +00:00
|
|
|
i915_gem_flush(dev, file_priv,
|
2008-11-21 07:23:03 +00:00
|
|
|
dev->invalidate_domains,
|
2010-09-18 10:02:01 +00:00
|
|
|
dev->flush_domains,
|
|
|
|
dev_priv->mm.flush_rings);
|
2010-02-02 16:08:37 +00:00
|
|
|
}
|
|
|
|
|
2009-02-19 22:54:51 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
struct drm_gem_object *obj = object_list[i];
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2009-08-25 10:15:50 +00:00
|
|
|
uint32_t old_write_domain = obj->write_domain;
|
2009-02-19 22:54:51 +00:00
|
|
|
|
|
|
|
obj->write_domain = obj->pending_write_domain;
|
2010-02-07 15:20:18 +00:00
|
|
|
if (obj->write_domain)
|
|
|
|
list_move_tail(&obj_priv->gpu_write_list,
|
|
|
|
&dev_priv->mm.gpu_write_list);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
trace_i915_gem_object_change_domain(obj,
|
|
|
|
obj->read_domains,
|
|
|
|
old_write_domain);
|
2009-02-19 22:54:51 +00:00
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
#if WATCH_COHERENCY
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
i915_gem_object_check_coherency(object_list[i],
|
|
|
|
exec_list[i].handle);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if WATCH_EXEC
|
2009-04-02 18:24:54 +00:00
|
|
|
i915_gem_dump_object(batch_obj,
|
2008-07-30 19:06:12 +00:00
|
|
|
args->batch_len,
|
|
|
|
__func__,
|
|
|
|
~0);
|
|
|
|
#endif
|
|
|
|
|
2010-10-07 16:28:15 +00:00
|
|
|
/* Check for any pending flips. As we only maintain a flip queue depth
|
|
|
|
* of 1, we can simply insert a WAIT for the next display flip prior
|
|
|
|
* to executing the batch and avoid stalling the CPU.
|
|
|
|
*/
|
|
|
|
flips = 0;
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
if (object_list[i]->write_domain)
|
|
|
|
flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
|
|
|
|
}
|
|
|
|
if (flips) {
|
|
|
|
int plane, flip_mask;
|
|
|
|
|
|
|
|
for (plane = 0; flips >> plane; plane++) {
|
|
|
|
if (((flips >> plane) & 1) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (plane)
|
|
|
|
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
|
|
|
else
|
|
|
|
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
|
|
|
|
|
|
|
intel_ring_begin(dev, ring, 2);
|
|
|
|
intel_ring_emit(dev, ring,
|
|
|
|
MI_WAIT_FOR_EVENT | flip_mask);
|
|
|
|
intel_ring_emit(dev, ring, MI_NOOP);
|
|
|
|
intel_ring_advance(dev, ring);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
/* Exec the batchbuffer */
|
2010-05-21 01:08:56 +00:00
|
|
|
ret = ring->dispatch_gem_execbuffer(dev, ring, args,
|
2010-10-07 16:28:15 +00:00
|
|
|
cliprects, exec_offset);
|
2008-07-30 19:06:12 +00:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("dispatch failed %d\n", ret);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the commands in the batch buffer are
|
|
|
|
* finished before the interrupt fires
|
|
|
|
*/
|
2010-02-11 21:29:04 +00:00
|
|
|
i915_retire_commands(dev, ring);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-02-11 21:16:02 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
struct drm_gem_object *obj = object_list[i];
|
|
|
|
obj_priv = to_intel_bo(obj);
|
|
|
|
|
|
|
|
i915_gem_object_move_to_active(obj, ring);
|
|
|
|
}
|
2010-09-28 09:07:56 +00:00
|
|
|
|
2010-09-22 10:22:30 +00:00
|
|
|
i915_add_request(dev, file_priv, request, ring);
|
2010-08-12 11:36:12 +00:00
|
|
|
request = NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
err:
|
2008-12-21 15:28:47 +00:00
|
|
|
for (i = 0; i < pinned; i++)
|
|
|
|
i915_gem_object_unpin(object_list[i]);
|
|
|
|
|
2009-03-03 19:45:57 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
if (object_list[i]) {
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(object_list[i]);
|
2009-03-03 19:45:57 +00:00
|
|
|
obj_priv->in_execbuffer = false;
|
|
|
|
}
|
2008-12-21 15:28:47 +00:00
|
|
|
drm_gem_object_unreference(object_list[i]);
|
2009-03-03 19:45:57 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
2010-01-31 10:40:48 +00:00
|
|
|
pre_mutex_err:
|
2009-05-08 23:13:25 +00:00
|
|
|
drm_free_large(object_list);
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(cliprects);
|
2010-08-12 11:36:12 +00:00
|
|
|
kfree(request);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-18 03:05:42 +00:00
|
|
|
/*
|
|
|
|
* Legacy execbuffer just creates an exec2 list from the original exec object
|
|
|
|
* list array and passes it to the real function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer *args = data;
|
|
|
|
struct drm_i915_gem_execbuffer2 exec2;
|
|
|
|
struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
#if WATCH_EXEC
|
|
|
|
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
|
|
|
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (args->buffer_count < 1) {
|
|
|
|
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy in the exec list from userland */
|
|
|
|
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
|
if (exec_list == NULL || exec2_list == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
|
|
|
args->buffer_count);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec_list,
|
|
|
|
(struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("copy %d exec entries failed %d\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
exec2_list[i].handle = exec_list[i].handle;
|
|
|
|
exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
|
|
|
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
|
|
|
exec2_list[i].alignment = exec_list[i].alignment;
|
|
|
|
exec2_list[i].offset = exec_list[i].offset;
|
2010-09-16 23:32:17 +00:00
|
|
|
if (INTEL_INFO(dev)->gen < 4)
|
2009-12-18 03:05:42 +00:00
|
|
|
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
|
|
|
else
|
|
|
|
exec2_list[i].flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
exec2.buffers_ptr = args->buffers_ptr;
|
|
|
|
exec2.buffer_count = args->buffer_count;
|
|
|
|
exec2.batch_start_offset = args->batch_start_offset;
|
|
|
|
exec2.batch_len = args->batch_len;
|
|
|
|
exec2.DR1 = args->DR1;
|
|
|
|
exec2.DR4 = args->DR4;
|
|
|
|
exec2.num_cliprects = args->num_cliprects;
|
|
|
|
exec2.cliprects_ptr = args->cliprects_ptr;
|
2010-05-21 01:08:56 +00:00
|
|
|
exec2.flags = I915_EXEC_RENDER;
|
2009-12-18 03:05:42 +00:00
|
|
|
|
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
|
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
|
|
for (i = 0; i < args->buffer_count; i++)
|
|
|
|
exec_list[i].offset = exec2_list[i].offset;
|
|
|
|
/* ... and back out to userspace */
|
|
|
|
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
|
|
|
exec_list,
|
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
DRM_ERROR("failed to copy %d exec entries "
|
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
#if WATCH_EXEC
|
|
|
|
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
|
|
|
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (args->buffer_count < 1) {
|
|
|
|
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
|
if (exec2_list == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
|
|
|
|
args->buffer_count);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec2_list,
|
|
|
|
(struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("copy %d exec entries failed %d\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
|
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
|
|
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
|
|
|
exec2_list,
|
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
DRM_ERROR("failed to copy %d exec entries "
|
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
int
|
|
|
|
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
2010-09-20 16:36:15 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
int ret;
|
|
|
|
|
2010-05-13 09:49:44 +00:00
|
|
|
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2010-05-27 12:18:18 +00:00
|
|
|
|
|
|
|
if (obj_priv->gtt_space != NULL) {
|
|
|
|
if (alignment == 0)
|
|
|
|
alignment = i915_gem_get_gtt_alignment(obj);
|
|
|
|
if (obj_priv->gtt_offset & (alignment - 1)) {
|
2010-08-04 11:37:41 +00:00
|
|
|
WARN(obj_priv->pin_count,
|
|
|
|
"bo is already pinned with incorrect alignment:"
|
|
|
|
" offset=%x, req.alignment=%x\n",
|
|
|
|
obj_priv->gtt_offset, alignment);
|
2010-05-27 12:18:18 +00:00
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
if (obj_priv->gtt_space == NULL) {
|
|
|
|
ret = i915_gem_object_bind_to_gtt(obj, alignment);
|
2009-09-20 23:22:34 +00:00
|
|
|
if (ret)
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
2009-02-11 14:26:45 +00:00
|
|
|
}
|
2009-12-18 03:05:42 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
obj_priv->pin_count++;
|
|
|
|
|
|
|
|
/* If the object is not active and not pending a flush,
|
|
|
|
* remove it from the inactive list
|
|
|
|
*/
|
|
|
|
if (obj_priv->pin_count == 1) {
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_add_pin(dev_priv, obj->size);
|
2010-09-20 16:36:15 +00:00
|
|
|
if (!obj_priv->active)
|
|
|
|
list_move_tail(&obj_priv->list,
|
|
|
|
&dev_priv->mm.pinned_list);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_object_unpin(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2008-07-30 19:06:12 +00:00
|
|
|
obj_priv->pin_count--;
|
|
|
|
BUG_ON(obj_priv->pin_count < 0);
|
|
|
|
BUG_ON(obj_priv->gtt_space == NULL);
|
|
|
|
|
|
|
|
/* If the object is no longer pinned, and is
|
|
|
|
* neither active nor being flushed, then stick it on
|
|
|
|
* the inactive list
|
|
|
|
*/
|
|
|
|
if (obj_priv->pin_count == 0) {
|
2010-09-20 16:36:15 +00:00
|
|
|
if (!obj_priv->active)
|
2008-07-30 19:06:12 +00:00
|
|
|
list_move_tail(&obj_priv->list,
|
|
|
|
&dev_priv->mm.inactive_list);
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_remove_pin(dev_priv, obj->size);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2010-09-29 15:10:57 +00:00
|
|
|
WARN_ON(i915_verify_lists(dev));
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_pin *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 13:24:13 +00:00
|
|
|
if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
|
|
DRM_ERROR("Attempting to pin a purgeable buffer\n");
|
2009-09-14 15:50:29 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
|
|
|
|
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2009-02-08 19:08:04 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj_priv->user_pin_count++;
|
|
|
|
obj_priv->pin_filp = file_priv;
|
|
|
|
if (obj_priv->user_pin_count == 1) {
|
|
|
|
ret = i915_gem_object_pin(obj, args->alignment);
|
|
|
|
if (ret != 0) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX - flush the CPU caches for pinned objects
|
|
|
|
* as the X server doesn't manage domains yet
|
|
|
|
*/
|
2008-11-14 21:35:19 +00:00
|
|
|
i915_gem_object_flush_cpu_write_domain(obj);
|
2008-07-30 19:06:12 +00:00
|
|
|
args->offset = obj_priv->gtt_offset;
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_pin *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2010-09-25 10:22:51 +00:00
|
|
|
int ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2010-09-25 10:22:51 +00:00
|
|
|
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
if (obj_priv->pin_filp != file_priv) {
|
|
|
|
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
obj_priv->user_pin_count--;
|
|
|
|
if (obj_priv->user_pin_count == 0) {
|
|
|
|
obj_priv->pin_filp = NULL;
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_busy *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2010-09-25 09:19:17 +00:00
|
|
|
int ret;
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
2010-09-25 09:19:17 +00:00
|
|
|
}
|
|
|
|
|
2010-08-04 14:36:30 +00:00
|
|
|
/* Count all active objects as busy, even if they are currently not used
|
|
|
|
* by the gpu. Users of this interface expect objects to eventually
|
|
|
|
* become non-busy without any further actions, therefore emit any
|
|
|
|
* necessary flushes here.
|
2008-12-15 03:05:04 +00:00
|
|
|
*/
|
2010-08-04 14:36:30 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
|
|
|
args->busy = obj_priv->active;
|
|
|
|
if (args->busy) {
|
|
|
|
/* Unconditionally flush objects, even when the gpu still uses this
|
|
|
|
* object. Userspace calling this function indicates that it wants to
|
|
|
|
* use this buffer rather sooner than later, so issuing the required
|
|
|
|
* flush earlier is beneficial.
|
|
|
|
*/
|
2010-09-20 11:50:23 +00:00
|
|
|
if (obj->write_domain & I915_GEM_GPU_DOMAINS)
|
|
|
|
i915_gem_flush_ring(dev, file_priv,
|
2010-09-18 10:02:01 +00:00
|
|
|
obj_priv->ring,
|
|
|
|
0, obj->write_domain);
|
2010-08-04 14:36:30 +00:00
|
|
|
|
|
|
|
/* Update the active list for the hardware's current position.
|
|
|
|
* Otherwise this only updates on a delayed timer or when irqs
|
|
|
|
* are actually unmasked, and our working set ends up being
|
|
|
|
* larger than required.
|
|
|
|
*/
|
|
|
|
i915_gem_retire_requests_ring(dev, obj_priv->ring);
|
|
|
|
|
|
|
|
args->busy = obj_priv->active;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2010-09-25 10:22:51 +00:00
|
|
|
return 0;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
return i915_gem_ring_throttle(dev, file_priv);
|
|
|
|
}
|
|
|
|
|
2009-09-14 15:50:29 +00:00
|
|
|
int
|
|
|
|
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_madvise *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2010-09-25 10:22:51 +00:00
|
|
|
int ret;
|
2009-09-14 15:50:29 +00:00
|
|
|
|
|
|
|
switch (args->madv) {
|
|
|
|
case I915_MADV_DONTNEED:
|
|
|
|
case I915_MADV_WILLNEED:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2010-08-04 13:19:46 +00:00
|
|
|
return -ENOENT;
|
2009-09-14 15:50:29 +00:00
|
|
|
}
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2009-09-14 15:50:29 +00:00
|
|
|
|
2010-09-25 10:22:51 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-14 15:50:29 +00:00
|
|
|
if (obj_priv->pin_count) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-09-22 13:24:13 +00:00
|
|
|
if (obj_priv->madv != __I915_MADV_PURGED)
|
|
|
|
obj_priv->madv = args->madv;
|
2009-09-14 15:50:29 +00:00
|
|
|
|
2009-09-20 22:13:10 +00:00
|
|
|
/* if the object is no longer bound, discard its backing storage */
|
|
|
|
if (i915_gem_object_is_purgeable(obj_priv) &&
|
|
|
|
obj_priv->gtt_space == NULL)
|
|
|
|
i915_gem_object_truncate(obj);
|
|
|
|
|
2009-09-22 13:24:13 +00:00
|
|
|
args->retained = obj_priv->madv != __I915_MADV_PURGED;
|
|
|
|
|
2009-09-14 15:50:29 +00:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-09 19:05:06 +00:00
|
|
|
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
|
|
|
|
size_t size)
|
|
|
|
{
|
2010-09-30 10:46:12 +00:00
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
2010-04-09 19:05:07 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
2010-04-09 19:05:06 +00:00
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
|
|
|
if (obj == NULL)
|
|
|
|
return NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
|
|
|
|
kfree(obj);
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_add_obj(dev_priv, size);
|
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
obj->agp_type = AGP_USER_MEMORY;
|
2010-04-09 19:05:08 +00:00
|
|
|
obj->base.driver_private = NULL;
|
2010-04-09 19:05:07 +00:00
|
|
|
obj->fence_reg = I915_FENCE_REG_NONE;
|
|
|
|
INIT_LIST_HEAD(&obj->list);
|
|
|
|
INIT_LIST_HEAD(&obj->gpu_write_list);
|
|
|
|
obj->madv = I915_MADV_WILLNEED;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
return &obj->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_gem_init_object(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
BUG();
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-07-23 22:18:50 +00:00
|
|
|
static void i915_gem_free_object_tail(struct drm_gem_object *obj)
|
2008-07-30 19:06:12 +00:00
|
|
|
{
|
2008-11-12 18:03:55 +00:00
|
|
|
struct drm_device *dev = obj->dev;
|
2010-07-23 22:18:50 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2010-07-23 22:18:50 +00:00
|
|
|
int ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-07-23 22:18:50 +00:00
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
|
|
if (ret == -ERESTARTSYS) {
|
|
|
|
list_move(&obj_priv->list,
|
|
|
|
&dev_priv->mm.deferred_free_list);
|
|
|
|
return;
|
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2009-09-10 07:53:04 +00:00
|
|
|
if (obj_priv->mmap_offset)
|
|
|
|
i915_gem_free_mmap_offset(obj);
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-04-09 19:05:07 +00:00
|
|
|
drm_gem_object_release(obj);
|
2010-09-30 10:46:12 +00:00
|
|
|
i915_gem_info_remove_obj(dev_priv, obj->size);
|
2010-04-09 19:05:07 +00:00
|
|
|
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(obj_priv->page_cpu_valid);
|
2009-03-12 23:56:27 +00:00
|
|
|
kfree(obj_priv->bit_17);
|
2010-04-09 19:05:07 +00:00
|
|
|
kfree(obj_priv);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 22:18:50 +00:00
|
|
|
void i915_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
|
|
|
|
|
|
|
trace_i915_gem_object_destroy(obj);
|
|
|
|
|
|
|
|
while (obj_priv->pin_count > 0)
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
|
|
|
|
if (obj_priv->phys_obj)
|
|
|
|
i915_gem_detach_phys_object(dev, obj);
|
|
|
|
|
|
|
|
i915_gem_free_object_tail(obj);
|
|
|
|
}
|
|
|
|
|
2010-01-07 10:39:13 +00:00
|
|
|
int
|
|
|
|
i915_gem_idle(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret;
|
2008-11-13 23:00:55 +00:00
|
|
|
|
2010-01-07 10:39:13 +00:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
if (dev_priv->mm.suspended ||
|
2010-05-21 01:08:57 +00:00
|
|
|
(dev_priv->render_ring.gem_object == NULL) ||
|
|
|
|
(HAS_BSD(dev) &&
|
|
|
|
dev_priv->bsd_ring.gem_object == NULL)) {
|
2010-01-07 10:39:13 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return 0;
|
2008-11-13 23:00:55 +00:00
|
|
|
}
|
|
|
|
|
2010-01-07 10:39:13 +00:00
|
|
|
ret = i915_gpu_idle(dev);
|
2008-10-15 04:41:13 +00:00
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-07-30 19:06:12 +00:00
|
|
|
return ret;
|
2008-10-15 04:41:13 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
|
2010-01-07 10:39:13 +00:00
|
|
|
/* Under UMS, be paranoid and evict. */
|
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
2010-08-07 10:01:23 +00:00
|
|
|
ret = i915_gem_evict_inactive(dev);
|
2010-01-07 10:39:13 +00:00
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
|
|
|
* We need to replace this with a semaphore, or something.
|
|
|
|
* And not confound mm.suspended!
|
|
|
|
*/
|
|
|
|
dev_priv->mm.suspended = 1;
|
2010-08-20 16:18:48 +00:00
|
|
|
del_timer_sync(&dev_priv->hangcheck_timer);
|
2010-01-07 10:39:13 +00:00
|
|
|
|
|
|
|
i915_kernel_lost_context(dev);
|
2008-10-15 04:41:13 +00:00
|
|
|
i915_gem_cleanup_ringbuffer(dev);
|
2010-01-07 10:39:13 +00:00
|
|
|
|
2008-10-15 04:41:13 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
2010-01-07 10:39:13 +00:00
|
|
|
/* Cancel the retire work handler, which should be idle now. */
|
|
|
|
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-21 18:39:23 +00:00
|
|
|
/*
|
|
|
|
* 965+ support PIPE_CONTROL commands, which provide finer grained control
|
|
|
|
* over cache flushing.
|
|
|
|
*/
|
2010-05-21 01:08:55 +00:00
|
|
|
static int
|
2010-04-21 18:39:23 +00:00
|
|
|
i915_gem_init_pipe_control(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
2010-05-07 21:30:03 +00:00
|
|
|
obj = i915_gem_alloc_object(dev, 4096);
|
2010-04-21 18:39:23 +00:00
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate seqno page\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
obj_priv = to_intel_bo(obj);
|
|
|
|
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
|
|
|
|
|
|
|
|
ret = i915_gem_object_pin(obj, 4096);
|
|
|
|
if (ret)
|
|
|
|
goto err_unref;
|
|
|
|
|
|
|
|
dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
|
|
|
|
dev_priv->seqno_page = kmap(obj_priv->pages[0]);
|
|
|
|
if (dev_priv->seqno_page == NULL)
|
|
|
|
goto err_unpin;
|
|
|
|
|
|
|
|
dev_priv->seqno_obj = obj;
|
|
|
|
memset(dev_priv->seqno_page, 0, PAGE_SIZE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
err_unref:
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
|
|
|
|
static void
|
2010-04-21 18:39:23 +00:00
|
|
|
i915_gem_cleanup_pipe_control(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
|
|
obj = dev_priv->seqno_obj;
|
|
|
|
obj_priv = to_intel_bo(obj);
|
|
|
|
kunmap(obj_priv->pages[0]);
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
dev_priv->seqno_obj = NULL;
|
|
|
|
|
|
|
|
dev_priv->seqno_page = NULL;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
int
|
|
|
|
i915_gem_init_ringbuffer(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret;
|
2010-05-27 12:18:22 +00:00
|
|
|
|
2010-05-21 01:08:55 +00:00
|
|
|
if (HAS_PIPE_CONTROL(dev)) {
|
|
|
|
ret = i915_gem_init_pipe_control(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2010-05-27 12:18:22 +00:00
|
|
|
|
2010-09-16 02:43:11 +00:00
|
|
|
ret = intel_init_render_ring_buffer(dev);
|
2010-05-27 12:18:22 +00:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_pipe_control;
|
|
|
|
|
|
|
|
if (HAS_BSD(dev)) {
|
2010-09-16 02:43:11 +00:00
|
|
|
ret = intel_init_bsd_ring_buffer(dev);
|
2010-05-27 12:18:22 +00:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_render_ring;
|
2010-05-21 01:08:57 +00:00
|
|
|
}
|
2010-05-27 12:18:22 +00:00
|
|
|
|
2010-08-07 10:01:22 +00:00
|
|
|
dev_priv->next_seqno = 1;
|
|
|
|
|
2010-05-27 12:18:22 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup_render_ring:
|
|
|
|
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
|
|
|
|
cleanup_pipe_control:
|
|
|
|
if (HAS_PIPE_CONTROL(dev))
|
|
|
|
i915_gem_cleanup_pipe_control(dev);
|
2010-05-21 01:08:55 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
|
2010-05-21 01:08:57 +00:00
|
|
|
if (HAS_BSD(dev))
|
|
|
|
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
|
2010-05-21 01:08:55 +00:00
|
|
|
if (HAS_PIPE_CONTROL(dev))
|
|
|
|
i915_gem_cleanup_pipe_control(dev);
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
int
|
|
|
|
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret;
|
|
|
|
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
return 0;
|
|
|
|
|
2009-09-14 21:48:47 +00:00
|
|
|
if (atomic_read(&dev_priv->mm.wedged)) {
|
2008-07-30 19:06:12 +00:00
|
|
|
DRM_ERROR("Reenabling wedged hardware, good luck\n");
|
2009-09-14 21:48:47 +00:00
|
|
|
atomic_set(&dev_priv->mm.wedged, 0);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
2008-12-24 02:42:32 +00:00
|
|
|
dev_priv->mm.suspended = 0;
|
|
|
|
|
|
|
|
ret = i915_gem_init_ringbuffer(dev);
|
2009-04-18 02:43:32 +00:00
|
|
|
if (ret != 0) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-12-24 02:42:32 +00:00
|
|
|
return ret;
|
2009-04-18 02:43:32 +00:00
|
|
|
}
|
2008-12-24 02:42:32 +00:00
|
|
|
|
2010-05-21 01:08:56 +00:00
|
|
|
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
|
2010-05-21 01:08:57 +00:00
|
|
|
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
|
2008-07-30 19:06:12 +00:00
|
|
|
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
|
|
|
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
2010-05-21 01:08:56 +00:00
|
|
|
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
|
2010-05-21 01:08:57 +00:00
|
|
|
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
|
2008-07-30 19:06:12 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-08-20 15:04:27 +00:00
|
|
|
|
2010-06-07 13:03:03 +00:00
|
|
|
ret = drm_irq_install(dev);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup_ringbuffer;
|
2008-08-20 15:04:27 +00:00
|
|
|
|
2008-07-30 19:06:12 +00:00
|
|
|
return 0;
|
2010-06-07 13:03:03 +00:00
|
|
|
|
|
|
|
cleanup_ringbuffer:
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
i915_gem_cleanup_ringbuffer(dev);
|
|
|
|
dev_priv->mm.suspended = 1;
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return ret;
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-07 22:24:08 +00:00
|
|
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
return 0;
|
|
|
|
|
2008-08-20 15:04:27 +00:00
|
|
|
drm_irq_uninstall(dev);
|
2009-09-09 00:09:24 +00:00
|
|
|
return i915_gem_idle(dev);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_lastclose(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2009-01-22 17:56:58 +00:00
|
|
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
return;
|
|
|
|
|
2008-10-15 04:41:13 +00:00
|
|
|
ret = i915_gem_idle(dev);
|
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_load(struct drm_device *dev)
|
|
|
|
{
|
2009-06-23 13:41:02 +00:00
|
|
|
int i;
|
2008-07-30 19:06:12 +00:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
2010-02-07 15:20:18 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
|
2008-07-30 19:06:12 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
2010-09-20 16:36:15 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
|
2009-08-29 19:49:51 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
2010-07-23 22:18:50 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
|
2010-05-21 01:08:56 +00:00
|
|
|
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
|
|
|
|
INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
|
2010-05-21 01:08:57 +00:00
|
|
|
if (HAS_BSD(dev)) {
|
|
|
|
INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
|
|
|
|
INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
|
|
|
|
}
|
2010-04-28 09:02:31 +00:00
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
2008-07-30 19:06:12 +00:00
|
|
|
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
|
|
|
i915_gem_retire_work_handler);
|
2010-09-25 09:19:17 +00:00
|
|
|
init_completion(&dev_priv->error_completion);
|
2009-09-14 15:50:28 +00:00
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
list_add(&dev_priv->mm.shrink_list, &shrink_list);
|
|
|
|
spin_unlock(&shrink_list_lock);
|
|
|
|
|
2010-07-20 03:15:31 +00:00
|
|
|
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
|
|
|
|
if (IS_GEN3(dev)) {
|
|
|
|
u32 tmp = I915_READ(MI_ARB_STATE);
|
|
|
|
if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
|
|
|
|
/* arb state is a masked write, so set bit + bit in mask */
|
|
|
|
tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
|
|
|
|
I915_WRITE(MI_ARB_STATE, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-12 18:03:55 +00:00
|
|
|
/* Old X drivers will take 0-2 for front, back, depth buffers */
|
2010-01-26 17:43:10 +00:00
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
dev_priv->fence_reg_start = 3;
|
2008-11-12 18:03:55 +00:00
|
|
|
|
2010-09-16 23:32:17 +00:00
|
|
|
if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
2008-11-12 18:03:55 +00:00
|
|
|
dev_priv->num_fence_regs = 16;
|
|
|
|
else
|
|
|
|
dev_priv->num_fence_regs = 8;
|
|
|
|
|
2009-06-23 13:41:02 +00:00
|
|
|
/* Initialize fence registers to zero */
|
2010-09-16 23:32:17 +00:00
|
|
|
switch (INTEL_INFO(dev)->gen) {
|
|
|
|
case 6:
|
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
case 4:
|
2009-06-23 13:41:02 +00:00
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
|
2010-09-16 23:32:17 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2009-06-23 13:41:02 +00:00
|
|
|
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
|
2010-09-16 23:32:17 +00:00
|
|
|
case 2:
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
|
|
|
|
break;
|
2009-06-23 13:41:02 +00:00
|
|
|
}
|
2008-07-30 19:06:12 +00:00
|
|
|
i915_gem_detect_bit_6_swizzle(dev);
|
2009-11-18 16:25:18 +00:00
|
|
|
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
2008-07-30 19:06:12 +00:00
|
|
|
}
|
2008-12-30 10:31:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a physically contiguous memory object for this object
|
|
|
|
* e.g. for cursor + overlay regs
|
|
|
|
*/
|
2010-08-20 12:23:26 +00:00
|
|
|
static int i915_gem_init_phys_object(struct drm_device *dev,
|
|
|
|
int id, int size, int align)
|
2008-12-30 10:31:46 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_phys_object *phys_obj;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (dev_priv->mm.phys_objs[id - 1] || !size)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-24 19:23:04 +00:00
|
|
|
phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (!phys_obj)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
phys_obj->id = id;
|
|
|
|
|
2010-08-07 10:01:39 +00:00
|
|
|
phys_obj->handle = drm_pci_alloc(dev, size, align);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (!phys_obj->handle) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto kfree_obj;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_X86
|
|
|
|
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
dev_priv->mm.phys_objs[id - 1] = phys_obj;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
kfree_obj:
|
2009-03-24 19:23:04 +00:00
|
|
|
kfree(phys_obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-08-20 12:23:26 +00:00
|
|
|
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
|
2008-12-30 10:31:46 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_phys_object *phys_obj;
|
|
|
|
|
|
|
|
if (!dev_priv->mm.phys_objs[id - 1])
|
|
|
|
return;
|
|
|
|
|
|
|
|
phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
|
|
if (phys_obj->cur_obj) {
|
|
|
|
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86
|
|
|
|
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
|
|
#endif
|
|
|
|
drm_pci_free(dev, phys_obj->handle);
|
|
|
|
kfree(phys_obj);
|
|
|
|
dev_priv->mm.phys_objs[id - 1] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_gem_free_all_phys_object(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2009-01-22 07:58:49 +00:00
|
|
|
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
|
2008-12-30 10:31:46 +00:00
|
|
|
i915_gem_free_phys_object(dev, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_gem_detach_phys_object(struct drm_device *dev,
|
|
|
|
struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
int page_count;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (!obj_priv->phys_obj)
|
|
|
|
return;
|
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, 0);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
page_count = obj->size / PAGE_SIZE;
|
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++) {
|
2009-03-19 21:10:50 +00:00
|
|
|
char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
|
2008-12-30 10:31:46 +00:00
|
|
|
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
|
|
|
|
|
|
|
memcpy(dst, src, PAGE_SIZE);
|
|
|
|
kunmap_atomic(dst, KM_USER0);
|
|
|
|
}
|
2009-03-19 21:10:50 +00:00
|
|
|
drm_clflush_pages(obj_priv->pages, page_count);
|
2008-12-30 10:31:46 +00:00
|
|
|
drm_agp_chipset_flush(dev);
|
2009-06-17 20:52:49 +00:00
|
|
|
|
|
|
|
i915_gem_object_put_pages(obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
out:
|
|
|
|
obj_priv->phys_obj->cur_obj = NULL;
|
|
|
|
obj_priv->phys_obj = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_attach_phys_object(struct drm_device *dev,
|
2010-08-07 10:01:39 +00:00
|
|
|
struct drm_gem_object *obj,
|
|
|
|
int id,
|
|
|
|
int align)
|
2008-12-30 10:31:46 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret = 0;
|
|
|
|
int page_count;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (id > I915_MAX_PHYS_OBJECT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-03-08 12:35:02 +00:00
|
|
|
obj_priv = to_intel_bo(obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
|
|
|
|
if (obj_priv->phys_obj) {
|
|
|
|
if (obj_priv->phys_obj->id == id)
|
|
|
|
return 0;
|
|
|
|
i915_gem_detach_phys_object(dev, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* create a new object */
|
|
|
|
if (!dev_priv->mm.phys_objs[id - 1]) {
|
|
|
|
ret = i915_gem_init_phys_object(dev, id,
|
2010-08-07 10:01:39 +00:00
|
|
|
obj->size, align);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (ret) {
|
2009-01-26 18:01:53 +00:00
|
|
|
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
|
2008-12-30 10:31:46 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bind to the object */
|
|
|
|
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
|
|
obj_priv->phys_obj->cur_obj = obj;
|
|
|
|
|
2010-01-27 13:36:32 +00:00
|
|
|
ret = i915_gem_object_get_pages(obj, 0);
|
2008-12-30 10:31:46 +00:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to get page list\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
page_count = obj->size / PAGE_SIZE;
|
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++) {
|
2009-03-19 21:10:50 +00:00
|
|
|
char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
|
2008-12-30 10:31:46 +00:00
|
|
|
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
|
|
|
|
|
|
|
memcpy(dst, src, PAGE_SIZE);
|
|
|
|
kunmap_atomic(src, KM_USER0);
|
|
|
|
}
|
|
|
|
|
2009-06-17 20:52:49 +00:00
|
|
|
i915_gem_object_put_pages(obj);
|
|
|
|
|
2008-12-30 10:31:46 +00:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
struct drm_i915_gem_pwrite *args,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2010-03-08 12:35:02 +00:00
|
|
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
2008-12-30 10:31:46 +00:00
|
|
|
void *obj_addr;
|
|
|
|
int ret;
|
|
|
|
char __user *user_data;
|
|
|
|
|
|
|
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
|
|
|
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
|
|
|
|
2009-10-09 03:39:40 +00:00
|
|
|
DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
|
2008-12-30 10:31:46 +00:00
|
|
|
ret = copy_from_user(obj_addr, user_data, args->size);
|
|
|
|
if (ret)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
drm_agp_chipset_flush(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-06-03 07:27:35 +00:00
|
|
|
|
2010-09-24 15:02:42 +00:00
|
|
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
2009-06-03 07:27:35 +00:00
|
|
|
{
|
2010-09-24 15:02:42 +00:00
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
2009-06-03 07:27:35 +00:00
|
|
|
|
|
|
|
/* Clean up our request list when the client is going away, so that
|
|
|
|
* later retire_requests won't dereference our soon-to-be-gone
|
|
|
|
* file_priv.
|
|
|
|
*/
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_lock(&file_priv->mm.lock);
|
2010-09-24 15:02:42 +00:00
|
|
|
while (!list_empty(&file_priv->mm.request_list)) {
|
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
|
|
|
|
request = list_first_entry(&file_priv->mm.request_list,
|
|
|
|
struct drm_i915_gem_request,
|
|
|
|
client_list);
|
|
|
|
list_del(&request->client_list);
|
|
|
|
request->file_priv = NULL;
|
|
|
|
}
|
2010-09-26 10:03:27 +00:00
|
|
|
spin_unlock(&file_priv->mm.lock);
|
2009-06-03 07:27:35 +00:00
|
|
|
}
|
2009-09-14 15:50:28 +00:00
|
|
|
|
2010-04-20 16:10:35 +00:00
|
|
|
static int
|
|
|
|
i915_gpu_is_active(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int lists_empty;
|
|
|
|
|
|
|
|
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
|
2010-05-21 01:08:56 +00:00
|
|
|
list_empty(&dev_priv->render_ring.active_list);
|
2010-05-21 01:08:57 +00:00
|
|
|
if (HAS_BSD(dev))
|
|
|
|
lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
|
2010-04-20 16:10:35 +00:00
|
|
|
|
|
|
|
return !lists_empty;
|
|
|
|
}
|
|
|
|
|
2009-09-14 15:50:28 +00:00
|
|
|
static int
|
2010-07-19 04:56:17 +00:00
|
|
|
i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
|
2009-09-14 15:50:28 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv, *next_dev;
|
|
|
|
struct drm_i915_gem_object *obj_priv, *next_obj;
|
|
|
|
int cnt = 0;
|
|
|
|
int would_deadlock = 1;
|
|
|
|
|
|
|
|
/* "fast-path" to count number of available objects */
|
|
|
|
if (nr_to_scan == 0) {
|
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
|
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
|
|
|
|
|
if (mutex_trylock(&dev->struct_mutex)) {
|
|
|
|
list_for_each_entry(obj_priv,
|
|
|
|
&dev_priv->mm.inactive_list,
|
|
|
|
list)
|
|
|
|
cnt++;
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&shrink_list_lock);
|
|
|
|
|
|
|
|
return (cnt / 100) * sysctl_vfs_cache_pressure;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
|
2010-04-20 16:10:35 +00:00
|
|
|
rescan:
|
2009-09-14 15:50:28 +00:00
|
|
|
/* first scan for clean buffers */
|
|
|
|
list_for_each_entry_safe(dev_priv, next_dev,
|
|
|
|
&shrink_list, mm.shrink_list) {
|
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
|
|
|
|
|
if (! mutex_trylock(&dev->struct_mutex))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock(&shrink_list_lock);
|
2010-07-23 22:18:49 +00:00
|
|
|
i915_gem_retire_requests(dev);
|
2009-09-14 15:50:28 +00:00
|
|
|
|
|
|
|
list_for_each_entry_safe(obj_priv, next_obj,
|
|
|
|
&dev_priv->mm.inactive_list,
|
|
|
|
list) {
|
|
|
|
if (i915_gem_object_is_purgeable(obj_priv)) {
|
2010-04-09 19:05:09 +00:00
|
|
|
i915_gem_object_unbind(&obj_priv->base);
|
2009-09-14 15:50:28 +00:00
|
|
|
if (--nr_to_scan <= 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
2009-09-20 22:03:54 +00:00
|
|
|
would_deadlock = 0;
|
|
|
|
|
2009-09-14 15:50:28 +00:00
|
|
|
if (nr_to_scan <= 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* second pass, evict/count anything still on the inactive list */
|
|
|
|
list_for_each_entry_safe(dev_priv, next_dev,
|
|
|
|
&shrink_list, mm.shrink_list) {
|
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
|
|
|
|
|
if (! mutex_trylock(&dev->struct_mutex))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock(&shrink_list_lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(obj_priv, next_obj,
|
|
|
|
&dev_priv->mm.inactive_list,
|
|
|
|
list) {
|
|
|
|
if (nr_to_scan > 0) {
|
2010-04-09 19:05:09 +00:00
|
|
|
i915_gem_object_unbind(&obj_priv->base);
|
2009-09-14 15:50:28 +00:00
|
|
|
nr_to_scan--;
|
|
|
|
} else
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
would_deadlock = 0;
|
|
|
|
}
|
|
|
|
|
2010-04-20 16:10:35 +00:00
|
|
|
if (nr_to_scan) {
|
|
|
|
int active = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are desperate for pages, so as a last resort, wait
|
|
|
|
* for the GPU to finish and discard whatever we can.
|
|
|
|
* This has a dramatic impact to reduce the number of
|
|
|
|
* OOM-killer events whilst running the GPU aggressively.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
|
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
|
|
|
|
|
if (!mutex_trylock(&dev->struct_mutex))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock(&shrink_list_lock);
|
|
|
|
|
|
|
|
if (i915_gpu_is_active(dev)) {
|
|
|
|
i915_gpu_idle(dev);
|
|
|
|
active++;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&shrink_list_lock);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (active)
|
|
|
|
goto rescan;
|
|
|
|
}
|
|
|
|
|
2009-09-14 15:50:28 +00:00
|
|
|
spin_unlock(&shrink_list_lock);
|
|
|
|
|
|
|
|
if (would_deadlock)
|
|
|
|
return -1;
|
|
|
|
else if (cnt > 0)
|
|
|
|
return (cnt / 100) * sysctl_vfs_cache_pressure;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct shrinker shrinker = {
|
|
|
|
.shrink = i915_gem_shrink,
|
|
|
|
.seeks = DEFAULT_SEEKS,
|
|
|
|
};
|
|
|
|
|
|
|
|
__init void
|
|
|
|
i915_gem_shrinker_init(void)
|
|
|
|
{
|
|
|
|
register_shrinker(&shrinker);
|
|
|
|
}
|
|
|
|
|
|
|
|
__exit void
|
|
|
|
i915_gem_shrinker_exit(void)
|
|
|
|
{
|
|
|
|
unregister_shrinker(&shrinker);
|
|
|
|
}
|