Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/drm2/dist/drm/i915 Dedup the NetBSD portion...
details: https://anonhg.NetBSD.org/src/rev/914b2fc58607
branches: trunk
changeset: 336578:914b2fc58607
user: christos <christos%NetBSD.org@localhost>
date: Fri Mar 06 22:24:05 2015 +0000
description:
Dedup the NetBSD portion of the code (ok Riastradh), no functional change.
diffstat:
sys/external/bsd/drm2/dist/drm/i915/i915_gem.c | 106 +++++-------------------
1 files changed, 23 insertions(+), 83 deletions(-)
diffs (176 lines):
diff -r 9cb96c531630 -r 914b2fc58607 sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Fri Mar 06 22:03:06 2015 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Fri Mar 06 22:24:05 2015 +0000
@@ -705,51 +705,21 @@
offset = args->offset;
#ifdef __NetBSD__
- /*
- * XXX This is a big #ifdef with a lot of duplicated code, but
- * factoring out the loop head -- which is all that
- * substantially differs -- is probably more trouble than it's
- * worth at the moment.
- */
- while (0 < remain) {
- /* Get the next page. */
- shmem_page_offset = offset_in_page(offset);
- KASSERT(shmem_page_offset < PAGE_SIZE);
- page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+ while (0 < remain)
+#else
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT)
+#endif
+ {
+#ifdef __NetBSD__
struct page *const page = i915_gem_object_get_page(obj,
atop(offset));
-
- /* Decide whether to swizzle bit 17. */
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- /* Try the fast path. */
- ret = shmem_pread_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling, needs_clflush);
- if (ret == 0)
- goto next_page;
-
- /* Fast path failed. Try the slow path. */
- mutex_unlock(&dev->struct_mutex);
- /* XXX prefault */
- ret = shmem_pread_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling, needs_clflush);
- mutex_lock(&dev->struct_mutex);
- if (ret)
- goto out;
-
-next_page: KASSERT(page_length <= remain);
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
#else
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
struct page *page = sg_page_iter_page(&sg_iter);
if (remain <= 0)
break;
+#endif
/* Operation in this page
*
@@ -771,7 +741,7 @@
goto next_page;
mutex_unlock(&dev->struct_mutex);
-
+#ifndef __NetBSD__
if (likely(!i915.prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
@@ -781,7 +751,7 @@
(void)ret;
prefaulted = 1;
}
-
+#endif
ret = shmem_pread_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
needs_clflush);
@@ -796,7 +766,6 @@
user_data += page_length;
offset += page_length;
}
-#endif
out:
i915_gem_object_unpin_pages(obj);
@@ -1050,6 +1019,9 @@
int needs_clflush_before = 0;
#ifndef __NetBSD__
struct sg_page_iter sg_iter;
+ int flush_mask = boot_cpu_data.x86_clflush_size - 1;
+#else
+ int flush_mask = cpu_info_primary.ci_cflush_lsize - 1;
#endif
user_data = to_user_ptr(args->data_ptr);
@@ -1083,48 +1055,18 @@
obj->dirty = 1;
#ifdef __NetBSD__
- while (0 < remain) {
- /* Get the next page. */
- shmem_page_offset = offset_in_page(offset);
- KASSERT(shmem_page_offset < PAGE_SIZE);
- page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+ while (0 < remain)
+#else
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT)
+#endif
+ {
+#ifdef __NetBSD__
struct page *const page = i915_gem_object_get_page(obj,
atop(offset));
-
- /* Decide whether to flush the cache or swizzle bit 17. */
- const bool partial_cacheline_write = needs_clflush_before &&
- ((shmem_page_offset | page_length)
- & (cpu_info_primary.ci_cflush_lsize - 1));
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- /* Try the fast path. */
- ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write, needs_clflush_after);
- if (ret == 0)
- goto next_page;
-
- /* Fast path failed. Try the slow path. */
- hit_slowpath = 1;
- mutex_unlock(&dev->struct_mutex);
- ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write, needs_clflush_after);
- mutex_lock(&dev->struct_mutex);
- if (ret)
- goto out;
-
-next_page: KASSERT(page_length <= remain);
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
#else
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
struct page *page = sg_page_iter_page(&sg_iter);
- int partial_cacheline_write;
+#endif
if (remain <= 0)
break;
@@ -1143,9 +1085,8 @@
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush_before &&
- ((shmem_page_offset | page_length)
- & (boot_cpu_data.x86_clflush_size - 1));
+ const int partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length) & flush_mask);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -1174,7 +1115,6 @@
user_data += page_length;
offset += page_length;
}
-#endif
out:
i915_gem_object_unpin_pages(obj);
Home |
Main Index |
Thread Index |
Old Index