diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-25 19:47:42 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-27 13:40:57 +0200 |
commit | 23c18c71da801fb7ce11acc3041e4f10a1bb5cb0 (patch) | |
tree | a0e4a48c5736670aef88dceb18fec82036a518e6 | |
parent | f56f821feb7b36223f309e0ec05986bb137ce418 (diff) |
drm/i915: fixup in-line clflushing on bit17 swizzled bos
The issue is that with inline clflushing the clflushing isn't properly
swizzled. Fix this by
- always clflushing entire 128 byte chunks and
- unconditionally flush before writes when swizzling a given page.
We could be clever and check whether we pwrite a partial 128 byte
chunk instead of a partial cacheline, but I've figured that's not
worth it.
Now the usual approach is to fold this into the original patch series, but
I've opted against this because
- this fixes a corner case only very old userspace relies on and
- I'd like to not invalidate all the testing the pwrite rewrite has gotten.
This fixes the regression notice by tests/gem_tiled_partial_prite_pread
from i-g-t. Unfortunately it doesn't fix the issues with partial pwrites to
tiled buffers on bit17 swizzling machines. But that is also broken without
the pwrite patches, so likely a different issue (or a problem with the
testcase).
v2: Simplify the patch by dropping the overly clever partial write
logic for swizzled pages.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 39 |
1 files changed, 32 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6dc832902f53..c964dfbdb577 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -313,6 +313,28 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, return ret; } +static void +shmem_clflush_swizzled_range(char *addr, unsigned long length, + bool swizzled) +{ + if (swizzled) { + unsigned long start = (unsigned long) addr; + unsigned long end = (unsigned long) addr + length; + + /* For swizzling simply ensure that we always flush both + * channels. Lame, but simple and it works. Swizzled + * pwrite/pread is far from a hotpath - current userspace + * doesn't use it at all. */ + start = round_down(start, 128); + end = round_up(end, 128); + + drm_clflush_virt_range((void *)start, end - start); + } else { + drm_clflush_virt_range(addr, length); + } + +} + /* Only difference to the fast-path function is that this can handle bit17 * and uses non-atomic copy and kmap functions. */ static int @@ -325,8 +347,9 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, vaddr = kmap(page); if (needs_clflush) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); if (page_do_bit17_swizzling) ret = __copy_to_user_swizzled(user_data, @@ -637,9 +660,10 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, int ret; vaddr = kmap(page); - if (needs_clflush_before) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + if (needs_clflush_before || page_do_bit17_swizzling) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); if (page_do_bit17_swizzling) ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, user_data, @@ -649,8 +673,9 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, user_data, page_length); if (needs_clflush_after) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); kunmap(page); return ret; |