wok-current rev 6303
Added a patch to libdrm to fix some intel issues.
author | Christopher Rogers <slaxemulator@gmail.com> |
---|---|
date | Thu Sep 16 18:48:28 2010 +0000 (2010-09-16) |
parents | aa8b383fcb2b |
children | d62539c455d6 |
files | libdrm/receipt libdrm/stuff/libdrm-2.4.21-b803918f3f.patch |
line diff
1.1 --- a/libdrm/receipt Thu Sep 16 15:40:55 2010 +0000 1.2 +++ b/libdrm/receipt Thu Sep 16 18:48:28 2010 +0000 1.3 @@ -15,6 +15,8 @@ 1.4 compile_rules() 1.5 { 1.6 cd $src 1.7 + # fixes some intel issues 1.8 + patch -Np1 -i ../stuff/libdrm-2.4.21-b803918f3f.patch 1.9 ./configure \ 1.10 --prefix=/usr \ 1.11 --enable-intel \
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/libdrm/stuff/libdrm-2.4.21-b803918f3f.patch Thu Sep 16 18:48:28 2010 +0000 2.3 @@ -0,0 +1,506 @@ 2.4 +diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c 2.5 +index a8e072d..3446390 100644 2.6 +--- a/intel/intel_bufmgr_gem.c 2.7 ++++ b/intel/intel_bufmgr_gem.c 2.8 +@@ -93,6 +93,7 @@ typedef struct _drm_intel_bufmgr_gem { 2.9 + /** Array of lists of cached gem objects of power-of-two sizes */ 2.10 + struct drm_intel_gem_bo_bucket cache_bucket[14 * 4]; 2.11 + int num_buckets; 2.12 ++ time_t time; 2.13 + 2.14 + uint64_t gtt_size; 2.15 + int available_fences; 2.16 +@@ -132,6 +133,7 @@ struct _drm_intel_bo_gem { 2.17 + */ 2.18 + uint32_t tiling_mode; 2.19 + uint32_t swizzle_mode; 2.20 ++ unsigned long stride; 2.21 + 2.22 + time_t free_time; 2.23 + 2.24 +@@ -200,8 +202,9 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2.25 + uint32_t * swizzle_mode); 2.26 + 2.27 + static int 2.28 +-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2.29 +- uint32_t stride); 2.30 ++drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 2.31 ++ uint32_t tiling_mode, 2.32 ++ uint32_t stride); 2.33 + 2.34 + static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, 2.35 + time_t time); 2.36 +@@ -251,7 +254,7 @@ drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, 2.37 + */ 2.38 + static unsigned long 2.39 + drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, 2.40 +- unsigned long pitch, uint32_t tiling_mode) 2.41 ++ unsigned long pitch, uint32_t *tiling_mode) 2.42 + { 2.43 + unsigned long tile_width; 2.44 + unsigned long i; 2.45 +@@ -259,10 +262,10 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, 2.46 + /* If untiled, then just align it so that we can do rendering 2.47 + * to it with the 3D engine. 2.48 + */ 2.49 +- if (tiling_mode == I915_TILING_NONE) 2.50 ++ if (*tiling_mode == I915_TILING_NONE) 2.51 + return ALIGN(pitch, 64); 2.52 + 2.53 +- if (tiling_mode == I915_TILING_X) 2.54 ++ if (*tiling_mode == I915_TILING_X) 2.55 + tile_width = 512; 2.56 + else 2.57 + tile_width = 128; 2.58 +@@ -271,6 +274,14 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, 2.59 + if (bufmgr_gem->gen >= 4) 2.60 + return ROUND_UP_TO(pitch, tile_width); 2.61 + 2.62 ++ /* The older hardware has a maximum pitch of 8192 with tiled 2.63 ++ * surfaces, so fallback to untiled if it's too large. 2.64 ++ */ 2.65 ++ if (pitch > 8192) { 2.66 ++ *tiling_mode = I915_TILING_NONE; 2.67 ++ return ALIGN(pitch, 64); 2.68 ++ } 2.69 ++ 2.70 + /* Pre-965 needs power of two tile width */ 2.71 + for (i = tile_width; i < pitch; i <<= 1) 2.72 + ; 2.73 +@@ -549,7 +560,9 @@ static drm_intel_bo * 2.74 + drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, 2.75 + const char *name, 2.76 + unsigned long size, 2.77 +- unsigned long flags) 2.78 ++ unsigned long flags, 2.79 ++ uint32_t tiling_mode, 2.80 ++ unsigned long stride) 2.81 + { 2.82 + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 2.83 + drm_intel_bo_gem *bo_gem; 2.84 +@@ -615,6 +628,13 @@ retry: 2.85 + bucket); 2.86 + goto retry; 2.87 + } 2.88 ++ 2.89 ++ if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 2.90 ++ tiling_mode, 2.91 ++ stride)) { 2.92 ++ drm_intel_gem_bo_free(&bo_gem->bo); 2.93 ++ goto retry; 2.94 ++ } 2.95 + } 2.96 + } 2.97 + pthread_mutex_unlock(&bufmgr_gem->lock); 2.98 +@@ -642,6 +662,17 @@ retry: 2.99 + return NULL; 2.100 + } 2.101 + bo_gem->bo.bufmgr = bufmgr; 2.102 ++ 2.103 ++ bo_gem->tiling_mode = I915_TILING_NONE; 2.104 ++ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 2.105 ++ bo_gem->stride = 0; 2.106 ++ 2.107 ++ if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 2.108 ++ tiling_mode, 2.109 ++ stride)) { 2.110 ++ drm_intel_gem_bo_free(&bo_gem->bo); 2.111 ++ return NULL; 2.112 ++ } 2.113 + } 2.114 + 2.115 + bo_gem->name = name; 2.116 +@@ -650,8 +681,6 @@ retry: 2.117 + bo_gem->reloc_tree_fences = 0; 2.118 + bo_gem->used_as_reloc_target = 0; 2.119 + bo_gem->has_error = 0; 2.120 +- bo_gem->tiling_mode = I915_TILING_NONE; 2.121 +- bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 2.122 + bo_gem->reusable = 1; 2.123 + 2.124 + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 2.125 +@@ -669,7 +698,8 @@ drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, 2.126 + unsigned int alignment) 2.127 + { 2.128 + return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 2.129 +- BO_ALLOC_FOR_RENDER); 2.130 ++ BO_ALLOC_FOR_RENDER, 2.131 ++ I915_TILING_NONE, 0); 2.132 + } 2.133 + 2.134 + static drm_intel_bo * 2.135 +@@ -678,7 +708,8 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, 2.136 + unsigned long size, 2.137 + unsigned int alignment) 2.138 + { 2.139 +- return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0); 2.140 ++ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, 2.141 ++ I915_TILING_NONE, 0); 2.142 + } 2.143 + 2.144 + static drm_intel_bo * 2.145 +@@ -687,10 +718,8 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, 2.146 + unsigned long *pitch, unsigned long flags) 2.147 + { 2.148 + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 2.149 +- drm_intel_bo *bo; 2.150 + unsigned long size, stride; 2.151 + uint32_t tiling; 2.152 +- int ret; 2.153 + 2.154 + do { 2.155 + unsigned long aligned_y; 2.156 +@@ -717,24 +746,17 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, 2.157 + aligned_y = ALIGN(y, 32); 2.158 + 2.159 + stride = x * cpp; 2.160 +- stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling); 2.161 ++ stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode); 2.162 + size = stride * aligned_y; 2.163 + size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); 2.164 + } while (*tiling_mode != tiling); 2.165 +- 2.166 +- bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags); 2.167 +- if (!bo) 2.168 +- return NULL; 2.169 +- 2.170 +- ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride); 2.171 +- if (ret != 0) { 2.172 +- drm_intel_gem_bo_unreference(bo); 2.173 +- return NULL; 2.174 +- } 2.175 +- 2.176 + *pitch = stride; 2.177 + 2.178 +- return bo; 2.179 ++ if (tiling == I915_TILING_NONE) 2.180 ++ stride = 0; 2.181 ++ 2.182 ++ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, 2.183 ++ tiling, stride); 2.184 + } 2.185 + 2.186 + /** 2.187 +@@ -791,6 +813,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, 2.188 + } 2.189 + bo_gem->tiling_mode = get_tiling.tiling_mode; 2.190 + bo_gem->swizzle_mode = get_tiling.swizzle_mode; 2.191 ++ /* XXX stride is unknown */ 2.192 + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 2.193 + 2.194 + DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); 2.195 +@@ -829,6 +852,9 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) 2.196 + { 2.197 + int i; 2.198 + 2.199 ++ if (bufmgr_gem->time == time) 2.200 ++ return; 2.201 ++ 2.202 + for (i = 0; i < bufmgr_gem->num_buckets; i++) { 2.203 + struct drm_intel_gem_bo_bucket *bucket = 2.204 + &bufmgr_gem->cache_bucket[i]; 2.205 +@@ -846,6 +872,8 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) 2.206 + drm_intel_gem_bo_free(&bo_gem->bo); 2.207 + } 2.208 + } 2.209 ++ 2.210 ++ bufmgr_gem->time = time; 2.211 + } 2.212 + 2.213 + static void 2.214 +@@ -854,7 +882,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) 2.215 + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2.216 + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2.217 + struct drm_intel_gem_bo_bucket *bucket; 2.218 +- uint32_t tiling_mode; 2.219 + int i; 2.220 + 2.221 + /* Unreference all the target buffers */ 2.222 +@@ -883,9 +910,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) 2.223 + 2.224 + bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); 2.225 + /* Put the buffer into our internal cache for reuse if we can. */ 2.226 +- tiling_mode = I915_TILING_NONE; 2.227 + if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && 2.228 +- drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 && 2.229 + drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, 2.230 + I915_MADV_DONTNEED)) { 2.231 + bo_gem->free_time = time; 2.232 +@@ -894,8 +919,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) 2.233 + bo_gem->validate_index = -1; 2.234 + 2.235 + DRMLISTADDTAIL(&bo_gem->head, &bucket->head); 2.236 +- 2.237 +- drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time); 2.238 + } else { 2.239 + drm_intel_gem_bo_free(bo); 2.240 + } 2.241 +@@ -925,6 +948,7 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) 2.242 + 2.243 + pthread_mutex_lock(&bufmgr_gem->lock); 2.244 + drm_intel_gem_bo_unreference_final(bo, time.tv_sec); 2.245 ++ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); 2.246 + pthread_mutex_unlock(&bufmgr_gem->lock); 2.247 + } 2.248 + } 2.249 +@@ -982,12 +1006,9 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) 2.250 + &set_domain); 2.251 + } while (ret == -1 && errno == EINTR); 2.252 + if (ret != 0) { 2.253 +- ret = -errno; 2.254 + fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n", 2.255 + __FILE__, __LINE__, bo_gem->gem_handle, 2.256 + strerror(errno)); 2.257 +- pthread_mutex_unlock(&bufmgr_gem->lock); 2.258 +- return ret; 2.259 + } 2.260 + 2.261 + pthread_mutex_unlock(&bufmgr_gem->lock); 2.262 +@@ -1062,9 +1083,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) 2.263 + DRM_IOCTL_I915_GEM_SET_DOMAIN, 2.264 + &set_domain); 2.265 + } while (ret == -1 && errno == EINTR); 2.266 +- 2.267 + if (ret != 0) { 2.268 +- ret = -errno; 2.269 + fprintf(stderr, "%s:%d: Error setting domain %d: %s\n", 2.270 + __FILE__, __LINE__, bo_gem->gem_handle, 2.271 + strerror(errno)); 2.272 +@@ -1072,7 +1091,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) 2.273 + 2.274 + pthread_mutex_unlock(&bufmgr_gem->lock); 2.275 + 2.276 +- return ret; 2.277 ++ return 0; 2.278 + } 2.279 + 2.280 + int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) 2.281 +@@ -1587,7 +1606,7 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used, 2.282 + 2.283 + if (ret != 0) { 2.284 + ret = -errno; 2.285 +- if (ret == -ENOMEM) { 2.286 ++ if (ret == -ENOSPC) { 2.287 + fprintf(stderr, 2.288 + "Execbuffer fails to pin. " 2.289 + "Estimate: %u. Actual: %u. Available: %u\n", 2.290 +@@ -1671,34 +1690,56 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo) 2.291 + } 2.292 + 2.293 + static int 2.294 +-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2.295 +- uint32_t stride) 2.296 ++drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 2.297 ++ uint32_t tiling_mode, 2.298 ++ uint32_t stride) 2.299 + { 2.300 + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2.301 + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2.302 + struct drm_i915_gem_set_tiling set_tiling; 2.303 + int ret; 2.304 + 2.305 +- if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode) 2.306 ++ if (bo_gem->global_name == 0 && 2.307 ++ tiling_mode == bo_gem->tiling_mode && 2.308 ++ stride == bo_gem->stride) 2.309 + return 0; 2.310 + 2.311 + memset(&set_tiling, 0, sizeof(set_tiling)); 2.312 +- set_tiling.handle = bo_gem->gem_handle; 2.313 +- 2.314 + do { 2.315 +- set_tiling.tiling_mode = *tiling_mode; 2.316 ++ set_tiling.handle = bo_gem->gem_handle; 2.317 ++ set_tiling.tiling_mode = tiling_mode; 2.318 + set_tiling.stride = stride; 2.319 + 2.320 + ret = ioctl(bufmgr_gem->fd, 2.321 + DRM_IOCTL_I915_GEM_SET_TILING, 2.322 + &set_tiling); 2.323 + } while (ret == -1 && errno == EINTR); 2.324 +- if (ret == 0) { 2.325 +- bo_gem->tiling_mode = set_tiling.tiling_mode; 2.326 +- bo_gem->swizzle_mode = set_tiling.swizzle_mode; 2.327 ++ if (ret == -1) 2.328 ++ return -errno; 2.329 ++ 2.330 ++ bo_gem->tiling_mode = set_tiling.tiling_mode; 2.331 ++ bo_gem->swizzle_mode = set_tiling.swizzle_mode; 2.332 ++ bo_gem->stride = set_tiling.stride; 2.333 ++ return 0; 2.334 ++} 2.335 ++ 2.336 ++static int 2.337 ++drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2.338 ++ uint32_t stride) 2.339 ++{ 2.340 ++ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2.341 ++ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2.342 ++ int ret; 2.343 ++ 2.344 ++ /* Linear buffers have no stride. By ensuring that we only ever use 2.345 ++ * stride 0 with linear buffers, we simplify our code. 2.346 ++ */ 2.347 ++ if (*tiling_mode == I915_TILING_NONE) 2.348 ++ stride = 0; 2.349 ++ 2.350 ++ ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); 2.351 ++ if (ret == 0) 2.352 + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 2.353 +- } else 2.354 +- ret = -errno; 2.355 + 2.356 + *tiling_mode = bo_gem->tiling_mode; 2.357 + return ret; 2.358 +diff --git a/xf86drmMode.c b/xf86drmMode.c 2.359 +index f330e6f..ecb1fd5 100644 2.360 +--- a/xf86drmMode.c 2.361 ++++ b/xf86drmMode.c 2.362 +@@ -52,6 +52,12 @@ 2.363 + #define U642VOID(x) ((void *)(unsigned long)(x)) 2.364 + #define VOID2U64(x) ((uint64_t)(unsigned long)(x)) 2.365 + 2.366 ++static inline DRM_IOCTL(int fd, int cmd, void *arg) 2.367 ++{ 2.368 ++ int ret = drmIoctl(fd, cmd, arg); 2.369 ++ return ret < 0 ? -errno : ret; 2.370 ++} 2.371 ++ 2.372 + /* 2.373 + * Util functions 2.374 + */ 2.375 +@@ -242,7 +248,7 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth, 2.376 + f.depth = depth; 2.377 + f.handle = bo_handle; 2.378 + 2.379 +- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_ADDFB, &f))) 2.380 ++ if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB, &f))) 2.381 + return ret; 2.382 + 2.383 + *buf_id = f.fb_id; 2.384 +@@ -251,7 +257,7 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth, 2.385 + 2.386 + int drmModeRmFB(int fd, uint32_t bufferId) 2.387 + { 2.388 +- return drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId); 2.389 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId); 2.390 + 2.391 + 2.392 + } 2.393 +@@ -289,7 +295,7 @@ int drmModeDirtyFB(int fd, uint32_t bufferId, 2.394 + dirty.clips_ptr = VOID2U64(clips); 2.395 + dirty.num_clips = num_clips; 2.396 + 2.397 +- return drmIoctl(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty); 2.398 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty); 2.399 + } 2.400 + 2.401 + 2.402 +@@ -344,7 +350,7 @@ int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId, 2.403 + } else 2.404 + crtc.mode_valid = 0; 2.405 + 2.406 +- return drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &crtc); 2.407 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETCRTC, &crtc); 2.408 + } 2.409 + 2.410 + /* 2.411 +@@ -361,7 +367,7 @@ int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width 2.412 + arg.height = height; 2.413 + arg.handle = bo_handle; 2.414 + 2.415 +- return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg); 2.416 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg); 2.417 + } 2.418 + 2.419 + int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y) 2.420 +@@ -373,7 +379,7 @@ int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y) 2.421 + arg.x = x; 2.422 + arg.y = y; 2.423 + 2.424 +- return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg); 2.425 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg); 2.426 + } 2.427 + 2.428 + /* 2.429 +@@ -510,7 +516,7 @@ int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf 2.430 + memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo)); 2.431 + res.connector_id = connector_id; 2.432 + 2.433 +- return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res); 2.434 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_ATTACHMODE, &res); 2.435 + } 2.436 + 2.437 + int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info) 2.438 +@@ -520,7 +526,7 @@ int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf 2.439 + memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo)); 2.440 + res.connector_id = connector_id; 2.441 + 2.442 +- return drmIoctl(fd, DRM_IOCTL_MODE_DETACHMODE, &res); 2.443 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DETACHMODE, &res); 2.444 + } 2.445 + 2.446 + 2.447 +@@ -637,16 +643,12 @@ int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property 2.448 + uint64_t value) 2.449 + { 2.450 + struct drm_mode_connector_set_property osp; 2.451 +- int ret; 2.452 + 2.453 + osp.connector_id = connector_id; 2.454 + osp.prop_id = property_id; 2.455 + osp.value = value; 2.456 + 2.457 +- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp))) 2.458 +- return ret; 2.459 +- 2.460 +- return 0; 2.461 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp); 2.462 + } 2.463 + 2.464 + /* 2.465 +@@ -715,7 +717,6 @@ int drmCheckModesettingSupported(const char *busid) 2.466 + int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size, 2.467 + uint16_t *red, uint16_t *green, uint16_t *blue) 2.468 + { 2.469 +- int ret; 2.470 + struct drm_mode_crtc_lut l; 2.471 + 2.472 + l.crtc_id = crtc_id; 2.473 +@@ -724,16 +725,12 @@ int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size, 2.474 + l.green = VOID2U64(green); 2.475 + l.blue = VOID2U64(blue); 2.476 + 2.477 +- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_GETGAMMA, &l))) 2.478 +- return ret; 2.479 +- 2.480 +- return 0; 2.481 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_GETGAMMA, &l); 2.482 + } 2.483 + 2.484 + int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size, 2.485 + uint16_t *red, uint16_t *green, uint16_t *blue) 2.486 + { 2.487 +- int ret; 2.488 + struct drm_mode_crtc_lut l; 2.489 + 2.490 + l.crtc_id = crtc_id; 2.491 +@@ -742,10 +739,7 @@ int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size, 2.492 + l.green = VOID2U64(green); 2.493 + l.blue = VOID2U64(blue); 2.494 + 2.495 +- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETGAMMA, &l))) 2.496 +- return ret; 2.497 +- 2.498 +- return 0; 2.499 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETGAMMA, &l); 2.500 + } 2.501 + 2.502 + int drmHandleEvent(int fd, drmEventContextPtr evctx) 2.503 +@@ -810,5 +804,5 @@ int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id, 2.504 + flip.flags = flags; 2.505 + flip.reserved = 0; 2.506 + 2.507 +- return drmIoctl(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip); 2.508 ++ return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip); 2.509 + }