Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[xsrc/trunk]: xsrc/external/mit/libdrm/dist merge libdrm 2.4.99
details: https://anonhg.NetBSD.org/xsrc/rev/e738e9c6db9e
branches: trunk
changeset: 10349:e738e9c6db9e
user: mrg <mrg%NetBSD.org@localhost>
date: Mon Jul 15 05:42:34 2019 +0000
description:
merge libdrm 2.4.99
diffstat:
external/mit/libdrm/dist/amdgpu/amdgpu_bo.c | 67 ++---
external/mit/libdrm/dist/include/drm/drm.h | 38 +++
external/mit/libdrm/dist/include/drm/i915_drm.h | 241 ++++++++++++++++--
external/mit/libdrm/dist/tests/modetest/modetest.c | 111 +++++++-
external/mit/libdrm/dist/xf86atomic.h | 2 -
external/mit/libdrm/dist/xf86drm.c | 260 ++++++++++++--------
external/mit/libdrm/dist/xf86drm.h | 13 +
external/mit/libdrm/dist/xf86drmMode.c | 9 +-
external/mit/libdrm/dist/xf86drmMode.h | 10 +-
9 files changed, 544 insertions(+), 207 deletions(-)
diffs (truncated from 1383 to 300 lines):
diff -r f6f90c8f7a18 -r e738e9c6db9e external/mit/libdrm/dist/amdgpu/amdgpu_bo.c
--- a/external/mit/libdrm/dist/amdgpu/amdgpu_bo.c Mon Jul 15 05:42:02 2019 +0000
+++ b/external/mit/libdrm/dist/amdgpu/amdgpu_bo.c Mon Jul 15 05:42:34 2019 +0000
@@ -39,13 +39,12 @@
#include "amdgpu_internal.h"
#include "util_math.h"
-static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
- uint32_t handle)
+static int amdgpu_close_kms_handle(int fd, uint32_t handle)
{
struct drm_gem_close args = {};
args.handle = handle;
- drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
+ return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
}
static int amdgpu_bo_create(amdgpu_device_handle dev,
@@ -54,11 +53,18 @@
amdgpu_bo_handle *buf_handle)
{
struct amdgpu_bo *bo;
+ int r;
bo = calloc(1, sizeof(struct amdgpu_bo));
if (!bo)
return -ENOMEM;
+ r = handle_table_insert(&dev->bo_handles, handle, bo);
+ if (r) {
+ free(bo);
+ return r;
+ }
+
atomic_set(&bo->refcount, 1);
bo->dev = dev;
bo->alloc_size = size;
@@ -90,19 +96,14 @@
if (r)
goto out;
+ pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
buf_handle);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
- amdgpu_close_kms_handle(dev, args.out.handle);
- goto out;
+ amdgpu_close_kms_handle(dev->fd, args.out.handle);
}
- pthread_mutex_lock(&dev->bo_table_mutex);
- r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
- *buf_handle);
- pthread_mutex_unlock(&dev->bo_table_mutex);
- if (r)
- amdgpu_bo_free(*buf_handle);
out:
return r;
}
@@ -214,11 +215,8 @@
bo->flink_name = flink.name;
- if (bo->dev->flink_fd != bo->dev->fd) {
- struct drm_gem_close args = {};
- args.handle = handle;
- drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
- }
+ if (bo->dev->flink_fd != bo->dev->fd)
+ amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
pthread_mutex_lock(&bo->dev->bo_table_mutex);
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
@@ -261,7 +259,6 @@
struct amdgpu_bo_import_result *output)
{
struct drm_gem_open open_arg = {};
- struct drm_gem_close close_arg = {};
struct amdgpu_bo *bo = NULL;
uint32_t handle = 0, flink_name = 0;
uint64_t alloc_size = 0;
@@ -345,12 +342,12 @@
close(dma_fd);
if (r)
goto free_bo_handle;
- close_arg.handle = open_arg.handle;
- r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
- &close_arg);
+ r = amdgpu_close_kms_handle(dev->flink_fd,
+ open_arg.handle);
if (r)
goto free_bo_handle;
}
+ open_arg.handle = 0;
break;
case amdgpu_bo_handle_type_dma_buf_fd:
@@ -368,15 +365,12 @@
if (r)
goto free_bo_handle;
- r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
- if (r)
- goto free_bo_handle;
if (flink_name) {
bo->flink_name = flink_name;
r = handle_table_insert(&dev->bo_flink_names, flink_name,
bo);
if (r)
- goto remove_handle;
+ goto free_bo_handle;
}
@@ -385,17 +379,14 @@
pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
-remove_handle:
- handle_table_remove(&dev->bo_handles, bo->handle);
free_bo_handle:
- if (flink_name && !close_arg.handle && open_arg.handle) {
- close_arg.handle = open_arg.handle;
- drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
- }
+ if (flink_name && open_arg.handle)
+ amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
+
if (bo)
amdgpu_bo_free(bo);
else
- amdgpu_close_kms_handle(dev, handle);
+ amdgpu_close_kms_handle(dev->fd, handle);
unlock:
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
@@ -424,12 +415,13 @@
amdgpu_bo_cpu_unmap(bo);
}
- amdgpu_close_kms_handle(dev, bo->handle);
+ amdgpu_close_kms_handle(dev->fd, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
}
pthread_mutex_unlock(&dev->bo_table_mutex);
+
return 0;
}
@@ -602,18 +594,13 @@
if (r)
goto out;
+ pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+ pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
- amdgpu_close_kms_handle(dev, args.handle);
- goto out;
+ amdgpu_close_kms_handle(dev->fd, args.handle);
}
- pthread_mutex_lock(&dev->bo_table_mutex);
- r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
- *buf_handle);
- pthread_mutex_unlock(&dev->bo_table_mutex);
- if (r)
- amdgpu_bo_free(*buf_handle);
out:
return r;
}
diff -r f6f90c8f7a18 -r e738e9c6db9e external/mit/libdrm/dist/include/drm/drm.h
--- a/external/mit/libdrm/dist/include/drm/drm.h Mon Jul 15 05:42:02 2019 +0000
+++ b/external/mit/libdrm/dist/include/drm/drm.h Mon Jul 15 05:42:34 2019 +0000
@@ -44,6 +44,7 @@
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
#ifndef __linux_sized_types__
@@ -646,6 +647,7 @@
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
#define DRM_CAP_SYNCOBJ 0x13
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -732,8 +734,18 @@
__u32 pad;
};
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -744,12 +756,33 @@
__u32 pad;
};
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+
struct drm_syncobj_array {
__u64 handles;
__u32 count_handles;
__u32 pad;
};
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+
/* Query current scanout sequence number */
struct drm_crtc_get_sequence {
__u32 crtc_id; /* requested crtc_id */
@@ -906,6 +939,11 @@
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff -r f6f90c8f7a18 -r e738e9c6db9e external/mit/libdrm/dist/include/drm/i915_drm.h
--- a/external/mit/libdrm/dist/include/drm/i915_drm.h Mon Jul 15 05:42:02 2019 +0000
+++ b/external/mit/libdrm/dist/include/drm/i915_drm.h Mon Jul 15 05:42:34 2019 +0000
@@ -63,6 +63,28 @@
#define I915_RESET_UEVENT "RESET"
/*
+ * i915_user_extension: Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the boundary with pointers encapsulated inside u64.
+ */
+struct i915_user_extension {
+ __u64 next_extension;
+ __u32 name;
+ __u32 flags; /* All undefined bits must be zero. */
+ __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+};
+
+/*
* MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.
*/
@@ -99,6 +121,8 @@
I915_ENGINE_CLASS_VIDEO = 2,
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+ /* should be kept compact */
+
I915_ENGINE_CLASS_INVALID = -1
};
Home |
Main Index |
Thread Index |
Old Index