Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/drm2/dist Rename drm_gem_object::gemo_shm_u...
details: https://anonhg.NetBSD.org/src/rev/cbcb84e6730e
branches: trunk
changeset: 992631:cbcb84e6730e
user: riastradh <riastradh%NetBSD.org@localhost>
date: Mon Aug 27 07:19:01 2018 +0000
description:
Rename drm_gem_object::gemo_shm_uao -> drm_gem_object::filp.
The Linux member that is basically analogous to this is named so,
which means we can eliminate a few diffs this way.
diffstat:
sys/external/bsd/drm2/dist/drm/drm_gem.c | 20 +-
sys/external/bsd/drm2/dist/drm/i915/i915_drv.h | 8 +-
sys/external/bsd/drm2/dist/drm/i915/i915_gem.c | 75 +++---------
sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c | 8 +-
sys/external/bsd/drm2/dist/drm/i915/i915_gem_shrinker.c | 12 +-
sys/external/bsd/drm2/dist/drm/nouveau/nouveau_bo.c | 8 +-
sys/external/bsd/drm2/dist/drm/nouveau/nouveau_gem.c | 9 +-
sys/external/bsd/drm2/dist/include/drm/drm_gem.h | 4 +-
8 files changed, 45 insertions(+), 99 deletions(-)
diffs (truncated from 480 to 300 lines):
diff -r 6db94bec4752 -r cbcb84e6730e sys/external/bsd/drm2/dist/drm/drm_gem.c
--- a/sys/external/bsd/drm2/dist/drm/drm_gem.c Mon Aug 27 07:18:47 2018 +0000
+++ b/sys/external/bsd/drm2/dist/drm/drm_gem.c Mon Aug 27 07:19:01 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: drm_gem.c,v 1.7 2018/08/27 04:58:19 riastradh Exp $ */
+/* $NetBSD: drm_gem.c,v 1.8 2018/08/27 07:19:01 riastradh Exp $ */
/*
* Copyright © 2008 Intel Corporation
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.7 2018/08/27 04:58:19 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.8 2018/08/27 07:19:01 riastradh Exp $");
#include <linux/types.h>
#include <linux/slab.h>
@@ -165,14 +165,14 @@
* A uao may not have size 0, but a gem object may. Allocate a
* spurious page so we needn't teach uao how to have size 0.
*/
- obj->gemo_shm_uao = uao_create(MAX(size, PAGE_SIZE), 0);
+ obj->filp = uao_create(MAX(size, PAGE_SIZE), 0);
/*
* XXX This is gross. We ought to do it the other way around:
* set the uao to have the main uvm object's lock. However,
* uvm_obj_setlock is not safe on uvm_aobjs.
*/
- mutex_obj_hold(obj->gemo_shm_uao->vmobjlock);
- uvm_obj_setlock(&obj->gemo_uvmobj, obj->gemo_shm_uao->vmobjlock);
+ mutex_obj_hold(obj->filp->vmobjlock);
+ uvm_obj_setlock(&obj->gemo_uvmobj, obj->filp->vmobjlock);
#else
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
@@ -202,7 +202,7 @@
obj->dev = dev;
#ifdef __NetBSD__
- obj->gemo_shm_uao = NULL;
+ obj->filp = NULL;
KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
KASSERT(dev->driver->gem_uvm_ops != NULL);
uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
@@ -537,7 +537,7 @@
TAILQ_INIT(&pglist);
/* XXX errno NetBSD->Linux */
- ret = -uvm_obj_wirepages(obj->gemo_shm_uao, 0, obj->size, &pglist);
+ ret = -uvm_obj_wirepages(obj->filp, 0, obj->size, &pglist);
if (ret)
goto fail1;
@@ -618,7 +618,7 @@
pages[i]->p_vmp.flags &= ~PG_CLEAN;
}
- uvm_obj_unwirepages(obj->gemo_shm_uao, 0, obj->size);
+ uvm_obj_unwirepages(obj->filp, 0, obj->size);
}
#else
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
@@ -862,8 +862,8 @@
#ifdef __NetBSD__
drm_vma_node_destroy(&obj->vma_node);
- if (obj->gemo_shm_uao)
- uao_detach(obj->gemo_shm_uao);
+ if (obj->filp)
+ uao_detach(obj->filp);
uvm_obj_destroy(&obj->gemo_uvmobj, true);
#else
if (obj->filp)
diff -r 6db94bec4752 -r cbcb84e6730e sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h Mon Aug 27 07:18:47 2018 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h Mon Aug 27 07:19:01 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_drv.h,v 1.18 2018/08/27 07:17:35 riastradh Exp $ */
+/* $NetBSD: i915_drv.h,v 1.19 2018/08/27 07:19:01 riastradh Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
@@ -2956,9 +2956,9 @@
* lock to prevent them from disappearing.
*/
KASSERT(obj->pages != NULL);
- mutex_enter(obj->base.gemo_shm_uao->vmobjlock);
- page = uvm_pagelookup(obj->base.gemo_shm_uao, ptoa(n));
- mutex_exit(obj->base.gemo_shm_uao->vmobjlock);
+ mutex_enter(obj->base.filp->vmobjlock);
+ page = uvm_pagelookup(obj->base.filp, ptoa(n));
+ mutex_exit(obj->base.filp->vmobjlock);
}
KASSERT(page != NULL);
return container_of(page, struct page, p_vmp);
diff -r 6db94bec4752 -r cbcb84e6730e sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Mon Aug 27 07:18:47 2018 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Mon Aug 27 07:19:01 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem.c,v 1.42 2018/08/27 07:17:58 riastradh Exp $ */
+/* $NetBSD: i915_gem.c,v 1.43 2018/08/27 07:19:01 riastradh Exp $ */
/*
* Copyright © 2008-2015 Intel Corporation
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.42 2018/08/27 07:17:58 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.43 2018/08/27 07:19:01 riastradh Exp $");
#ifdef __NetBSD__
#if 0 /* XXX uvmhist option? */
@@ -213,8 +213,8 @@
struct pglist pages = TAILQ_HEAD_INITIALIZER(pages);
int ret;
/* XXX errno NetBSD->Linux */
- ret = -uvm_obj_wirepages(obj->base.gemo_shm_uao,
- i*PAGE_SIZE, (i + 1)*PAGE_SIZE, &pages);
+ ret = -uvm_obj_wirepages(obj->base.filp, i*PAGE_SIZE,
+ (i + 1)*PAGE_SIZE, &pages);
if (ret)
return ret;
page = container_of(TAILQ_FIRST(&pages), struct page, p_vmp);
@@ -230,8 +230,8 @@
kunmap_atomic(src);
#ifdef __NetBSD__
- uvm_obj_unwirepages(obj->base.gemo_shm_uao,
- i*PAGE_SIZE, (i + 1)*PAGE_SIZE);
+ uvm_obj_unwirepages(obj->base.filp, i*PAGE_SIZE,
+ (i + 1)*PAGE_SIZE);
#else
page_cache_release(page);
#endif
@@ -297,7 +297,7 @@
#ifdef __NetBSD__
struct pglist pages = TAILQ_HEAD_INITIALIZER(pages);
/* XXX errno NetBSD->Linux */
- ret = -uvm_obj_wirepages(obj->base.gemo_shm_uao,
+ ret = -uvm_obj_wirepages(obj->base.filp,
i*PAGE_SIZE, (i + 1)*PAGE_SIZE, &pages);
if (ret)
continue;
@@ -313,8 +313,8 @@
#ifdef __NetBSD__
page->p_vmp.flags &= ~PG_CLEAN;
/* XXX mark page accessed */
- uvm_obj_unwirepages(obj->base.gemo_shm_uao,
- i*PAGE_SIZE, (i+1)*PAGE_SIZE);
+ uvm_obj_unwirepages(obj->base.filp, i*PAGE_SIZE,
+ (i+1)*PAGE_SIZE);
#else
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
@@ -380,13 +380,8 @@
if (obj->madv != I915_MADV_WILLNEED)
return -EFAULT;
-#ifdef __NetBSD__
- if (obj->base.gemo_shm_uao == NULL)
- return -EINVAL;
-#else
if (obj->base.filp == NULL)
return -EINVAL;
-#endif
ret = drop_pages(obj);
if (ret)
@@ -587,13 +582,8 @@
*needs_clflush = 0;
-#ifdef __NetBSD__
- if (obj->base.gemo_shm_uao == NULL)
- return -EINVAL;
-#else
if (!obj->base.filp)
return -EINVAL;
-#endif
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -839,18 +829,10 @@
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
-#ifdef __NetBSD__
- /* Also stolen objects. */
- if (obj->base.gemo_shm_uao == NULL) {
- ret = -EINVAL;
- goto out;
- }
-#else
if (!obj->base.filp) {
ret = -EINVAL;
goto out;
}
-#endif
trace_i915_gem_object_pread(obj, args->offset, args->size);
@@ -1225,18 +1207,10 @@
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
-#ifdef __NetBSD__
- /* Also stolen objects. */
- if (obj->base.gemo_shm_uao == NULL) {
- ret = -EINVAL;
- goto out;
- }
-#else
if (!obj->base.filp) {
ret = -EINVAL;
goto out;
}
-#endif
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
@@ -1975,18 +1949,10 @@
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
-#ifdef __NetBSD__
- /* Also stolen objects (XXX can we get them here?) */
- if (obj->gemo_shm_uao == NULL) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
- }
-#else
if (!obj->filp) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
-#endif
#ifdef __NetBSD__
addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
@@ -1994,7 +1960,7 @@
curproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
/* XXX errno NetBSD->Linux */
ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
- obj->gemo_shm_uao, args->offset, 0,
+ obj->filp, args->offset, 0,
UVM_MAPFLAG((VM_PROT_READ | VM_PROT_WRITE),
(VM_PROT_READ | VM_PROT_WRITE), UVM_INH_COPY, UVM_ADV_NORMAL,
0));
@@ -2002,7 +1968,7 @@
drm_gem_object_unreference_unlocked(obj);
return ret;
}
- uao_reference(obj->gemo_shm_uao);
+ uao_reference(obj->filp);
drm_gem_object_unreference_unlocked(obj);
#else
addr = vm_mmap(obj->filp, 0, args->size,
@@ -2590,12 +2556,12 @@
{
i915_gem_object_free_mmap_offset(obj);
-#ifdef __NetBSD__
- if (obj->base.gemo_shm_uao == NULL)
+ if (obj->base.filp == NULL)
return;
+#ifdef __NetBSD__
{
- struct uvm_object *const uobj = obj->base.gemo_shm_uao;
+ struct uvm_object *const uobj = obj->base.filp;
if (uobj != NULL) {
/* XXX Calling pgo_put like this is bogus. */
@@ -2605,9 +2571,6 @@
}
}
#else
- if (obj->base.filp == NULL)
- return;
-
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
@@ -2677,7 +2640,7 @@
}
obj->dirty = 0;
- uvm_obj_unwirepages(obj->base.gemo_shm_uao, 0, obj->base.size);
+ uvm_obj_unwirepages(obj->base.filp, 0, obj->base.size);
bus_dmamap_destroy(dev->dmat, obj->pages);
#else
struct sg_page_iter sg_iter;
@@ -2770,7 +2733,7 @@
goto fail0;
/* XXX errno NetBSD->Linux */
- ret = -uvm_obj_wirepages(obj->base.gemo_shm_uao, 0, obj->base.size,
+ ret = -uvm_obj_wirepages(obj->base.filp, 0, obj->base.size,
Home |
Main Index |
Thread Index |
Old Index