wok view libdrm/stuff/libdrm-2.4.21-b803918f3f.patch @ rev 17238

dovecot, proftpd: CVE-2014-3566
author Pascal Bellard <pascal.bellard@slitaz.org>
date Sat Oct 18 14:31:43 2014 +0200 (2014-10-18)
parents
children
line source
1 diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
2 index a8e072d..3446390 100644
3 --- a/intel/intel_bufmgr_gem.c
4 +++ b/intel/intel_bufmgr_gem.c
5 @@ -93,6 +93,7 @@ typedef struct _drm_intel_bufmgr_gem {
6 /** Array of lists of cached gem objects of power-of-two sizes */
7 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
8 int num_buckets;
9 + time_t time;
11 uint64_t gtt_size;
12 int available_fences;
13 @@ -132,6 +133,7 @@ struct _drm_intel_bo_gem {
14 */
15 uint32_t tiling_mode;
16 uint32_t swizzle_mode;
17 + unsigned long stride;
19 time_t free_time;
21 @@ -200,8 +202,9 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
22 uint32_t * swizzle_mode);
24 static int
25 -drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
26 - uint32_t stride);
27 +drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
28 + uint32_t tiling_mode,
29 + uint32_t stride);
31 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
32 time_t time);
33 @@ -251,7 +254,7 @@ drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
34 */
35 static unsigned long
36 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
37 - unsigned long pitch, uint32_t tiling_mode)
38 + unsigned long pitch, uint32_t *tiling_mode)
39 {
40 unsigned long tile_width;
41 unsigned long i;
42 @@ -259,10 +262,10 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
43 /* If untiled, then just align it so that we can do rendering
44 * to it with the 3D engine.
45 */
46 - if (tiling_mode == I915_TILING_NONE)
47 + if (*tiling_mode == I915_TILING_NONE)
48 return ALIGN(pitch, 64);
50 - if (tiling_mode == I915_TILING_X)
51 + if (*tiling_mode == I915_TILING_X)
52 tile_width = 512;
53 else
54 tile_width = 128;
55 @@ -271,6 +274,14 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
56 if (bufmgr_gem->gen >= 4)
57 return ROUND_UP_TO(pitch, tile_width);
59 + /* The older hardware has a maximum pitch of 8192 with tiled
60 + * surfaces, so fallback to untiled if it's too large.
61 + */
62 + if (pitch > 8192) {
63 + *tiling_mode = I915_TILING_NONE;
64 + return ALIGN(pitch, 64);
65 + }
66 +
67 /* Pre-965 needs power of two tile width */
68 for (i = tile_width; i < pitch; i <<= 1)
69 ;
70 @@ -549,7 +560,9 @@ static drm_intel_bo *
71 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
72 const char *name,
73 unsigned long size,
74 - unsigned long flags)
75 + unsigned long flags,
76 + uint32_t tiling_mode,
77 + unsigned long stride)
78 {
79 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
80 drm_intel_bo_gem *bo_gem;
81 @@ -615,6 +628,13 @@ retry:
82 bucket);
83 goto retry;
84 }
85 +
86 + if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
87 + tiling_mode,
88 + stride)) {
89 + drm_intel_gem_bo_free(&bo_gem->bo);
90 + goto retry;
91 + }
92 }
93 }
94 pthread_mutex_unlock(&bufmgr_gem->lock);
95 @@ -642,6 +662,17 @@ retry:
96 return NULL;
97 }
98 bo_gem->bo.bufmgr = bufmgr;
99 +
100 + bo_gem->tiling_mode = I915_TILING_NONE;
101 + bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
102 + bo_gem->stride = 0;
103 +
104 + if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
105 + tiling_mode,
106 + stride)) {
107 + drm_intel_gem_bo_free(&bo_gem->bo);
108 + return NULL;
109 + }
110 }
112 bo_gem->name = name;
113 @@ -650,8 +681,6 @@ retry:
114 bo_gem->reloc_tree_fences = 0;
115 bo_gem->used_as_reloc_target = 0;
116 bo_gem->has_error = 0;
117 - bo_gem->tiling_mode = I915_TILING_NONE;
118 - bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
119 bo_gem->reusable = 1;
121 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
122 @@ -669,7 +698,8 @@ drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
123 unsigned int alignment)
124 {
125 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
126 - BO_ALLOC_FOR_RENDER);
127 + BO_ALLOC_FOR_RENDER,
128 + I915_TILING_NONE, 0);
129 }
131 static drm_intel_bo *
132 @@ -678,7 +708,8 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
133 unsigned long size,
134 unsigned int alignment)
135 {
136 - return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
137 + return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
138 + I915_TILING_NONE, 0);
139 }
141 static drm_intel_bo *
142 @@ -687,10 +718,8 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
143 unsigned long *pitch, unsigned long flags)
144 {
145 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
146 - drm_intel_bo *bo;
147 unsigned long size, stride;
148 uint32_t tiling;
149 - int ret;
151 do {
152 unsigned long aligned_y;
153 @@ -717,24 +746,17 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
154 aligned_y = ALIGN(y, 32);
156 stride = x * cpp;
157 - stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling);
158 + stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
159 size = stride * aligned_y;
160 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
161 } while (*tiling_mode != tiling);
162 -
163 - bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
164 - if (!bo)
165 - return NULL;
166 -
167 - ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
168 - if (ret != 0) {
169 - drm_intel_gem_bo_unreference(bo);
170 - return NULL;
171 - }
172 -
173 *pitch = stride;
175 - return bo;
176 + if (tiling == I915_TILING_NONE)
177 + stride = 0;
178 +
179 + return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
180 + tiling, stride);
181 }
183 /**
184 @@ -791,6 +813,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
185 }
186 bo_gem->tiling_mode = get_tiling.tiling_mode;
187 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
188 + /* XXX stride is unknown */
189 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
191 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
192 @@ -829,6 +852,9 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
193 {
194 int i;
196 + if (bufmgr_gem->time == time)
197 + return;
198 +
199 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
200 struct drm_intel_gem_bo_bucket *bucket =
201 &bufmgr_gem->cache_bucket[i];
202 @@ -846,6 +872,8 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
203 drm_intel_gem_bo_free(&bo_gem->bo);
204 }
205 }
206 +
207 + bufmgr_gem->time = time;
208 }
210 static void
211 @@ -854,7 +882,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
212 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
213 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
214 struct drm_intel_gem_bo_bucket *bucket;
215 - uint32_t tiling_mode;
216 int i;
218 /* Unreference all the target buffers */
219 @@ -883,9 +910,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
221 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
222 /* Put the buffer into our internal cache for reuse if we can. */
223 - tiling_mode = I915_TILING_NONE;
224 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
225 - drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
226 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
227 I915_MADV_DONTNEED)) {
228 bo_gem->free_time = time;
229 @@ -894,8 +919,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
230 bo_gem->validate_index = -1;
232 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
233 -
234 - drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
235 } else {
236 drm_intel_gem_bo_free(bo);
237 }
238 @@ -925,6 +948,7 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
240 pthread_mutex_lock(&bufmgr_gem->lock);
241 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
242 + drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
243 pthread_mutex_unlock(&bufmgr_gem->lock);
244 }
245 }
246 @@ -982,12 +1006,9 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
247 &set_domain);
248 } while (ret == -1 && errno == EINTR);
249 if (ret != 0) {
250 - ret = -errno;
251 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
252 __FILE__, __LINE__, bo_gem->gem_handle,
253 strerror(errno));
254 - pthread_mutex_unlock(&bufmgr_gem->lock);
255 - return ret;
256 }
258 pthread_mutex_unlock(&bufmgr_gem->lock);
259 @@ -1062,9 +1083,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
260 DRM_IOCTL_I915_GEM_SET_DOMAIN,
261 &set_domain);
262 } while (ret == -1 && errno == EINTR);
263 -
264 if (ret != 0) {
265 - ret = -errno;
266 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
267 __FILE__, __LINE__, bo_gem->gem_handle,
268 strerror(errno));
269 @@ -1072,7 +1091,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
271 pthread_mutex_unlock(&bufmgr_gem->lock);
273 - return ret;
274 + return 0;
275 }
277 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
278 @@ -1587,7 +1606,7 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
280 if (ret != 0) {
281 ret = -errno;
282 - if (ret == -ENOMEM) {
283 + if (ret == -ENOSPC) {
284 fprintf(stderr,
285 "Execbuffer fails to pin. "
286 "Estimate: %u. Actual: %u. Available: %u\n",
287 @@ -1671,34 +1690,56 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo)
288 }
290 static int
291 -drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
292 - uint32_t stride)
293 +drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
294 + uint32_t tiling_mode,
295 + uint32_t stride)
296 {
297 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
298 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
299 struct drm_i915_gem_set_tiling set_tiling;
300 int ret;
302 - if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
303 + if (bo_gem->global_name == 0 &&
304 + tiling_mode == bo_gem->tiling_mode &&
305 + stride == bo_gem->stride)
306 return 0;
308 memset(&set_tiling, 0, sizeof(set_tiling));
309 - set_tiling.handle = bo_gem->gem_handle;
310 -
311 do {
312 - set_tiling.tiling_mode = *tiling_mode;
313 + set_tiling.handle = bo_gem->gem_handle;
314 + set_tiling.tiling_mode = tiling_mode;
315 set_tiling.stride = stride;
317 ret = ioctl(bufmgr_gem->fd,
318 DRM_IOCTL_I915_GEM_SET_TILING,
319 &set_tiling);
320 } while (ret == -1 && errno == EINTR);
321 - if (ret == 0) {
322 - bo_gem->tiling_mode = set_tiling.tiling_mode;
323 - bo_gem->swizzle_mode = set_tiling.swizzle_mode;
324 + if (ret == -1)
325 + return -errno;
326 +
327 + bo_gem->tiling_mode = set_tiling.tiling_mode;
328 + bo_gem->swizzle_mode = set_tiling.swizzle_mode;
329 + bo_gem->stride = set_tiling.stride;
330 + return 0;
331 +}
332 +
333 +static int
334 +drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
335 + uint32_t stride)
336 +{
337 + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
338 + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
339 + int ret;
340 +
341 + /* Linear buffers have no stride. By ensuring that we only ever use
342 + * stride 0 with linear buffers, we simplify our code.
343 + */
344 + if (*tiling_mode == I915_TILING_NONE)
345 + stride = 0;
346 +
347 + ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
348 + if (ret == 0)
349 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
350 - } else
351 - ret = -errno;
353 *tiling_mode = bo_gem->tiling_mode;
354 return ret;
355 diff --git a/xf86drmMode.c b/xf86drmMode.c
356 index f330e6f..ecb1fd5 100644
357 --- a/xf86drmMode.c
358 +++ b/xf86drmMode.c
359 @@ -52,6 +52,12 @@
360 #define U642VOID(x) ((void *)(unsigned long)(x))
361 #define VOID2U64(x) ((uint64_t)(unsigned long)(x))
363 +static inline DRM_IOCTL(int fd, int cmd, void *arg)
364 +{
365 + int ret = drmIoctl(fd, cmd, arg);
366 + return ret < 0 ? -errno : ret;
367 +}
368 +
369 /*
370 * Util functions
371 */
372 @@ -242,7 +248,7 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
373 f.depth = depth;
374 f.handle = bo_handle;
376 - if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_ADDFB, &f)))
377 + if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB, &f)))
378 return ret;
380 *buf_id = f.fb_id;
381 @@ -251,7 +257,7 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
383 int drmModeRmFB(int fd, uint32_t bufferId)
384 {
385 - return drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
386 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
389 }
390 @@ -289,7 +295,7 @@ int drmModeDirtyFB(int fd, uint32_t bufferId,
391 dirty.clips_ptr = VOID2U64(clips);
392 dirty.num_clips = num_clips;
394 - return drmIoctl(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty);
395 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty);
396 }
399 @@ -344,7 +350,7 @@ int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
400 } else
401 crtc.mode_valid = 0;
403 - return drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
404 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
405 }
407 /*
408 @@ -361,7 +367,7 @@ int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width
409 arg.height = height;
410 arg.handle = bo_handle;
412 - return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
413 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
414 }
416 int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
417 @@ -373,7 +379,7 @@ int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
418 arg.x = x;
419 arg.y = y;
421 - return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
422 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
423 }
425 /*
426 @@ -510,7 +516,7 @@ int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf
427 memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
428 res.connector_id = connector_id;
430 - return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
431 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
432 }
434 int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
435 @@ -520,7 +526,7 @@ int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf
436 memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
437 res.connector_id = connector_id;
439 - return drmIoctl(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
440 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
441 }
444 @@ -637,16 +643,12 @@ int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property
445 uint64_t value)
446 {
447 struct drm_mode_connector_set_property osp;
448 - int ret;
450 osp.connector_id = connector_id;
451 osp.prop_id = property_id;
452 osp.value = value;
454 - if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp)))
455 - return ret;
456 -
457 - return 0;
458 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp);
459 }
461 /*
462 @@ -715,7 +717,6 @@ int drmCheckModesettingSupported(const char *busid)
463 int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
464 uint16_t *red, uint16_t *green, uint16_t *blue)
465 {
466 - int ret;
467 struct drm_mode_crtc_lut l;
469 l.crtc_id = crtc_id;
470 @@ -724,16 +725,12 @@ int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
471 l.green = VOID2U64(green);
472 l.blue = VOID2U64(blue);
474 - if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_GETGAMMA, &l)))
475 - return ret;
476 -
477 - return 0;
478 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_GETGAMMA, &l);
479 }
481 int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
482 uint16_t *red, uint16_t *green, uint16_t *blue)
483 {
484 - int ret;
485 struct drm_mode_crtc_lut l;
487 l.crtc_id = crtc_id;
488 @@ -742,10 +739,7 @@ int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
489 l.green = VOID2U64(green);
490 l.blue = VOID2U64(blue);
492 - if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETGAMMA, &l)))
493 - return ret;
494 -
495 - return 0;
496 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETGAMMA, &l);
497 }
499 int drmHandleEvent(int fd, drmEventContextPtr evctx)
500 @@ -810,5 +804,5 @@ int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
501 flip.flags = flags;
502 flip.reserved = 0;
504 - return drmIoctl(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
505 + return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
506 }