]> git.codelabs.ch Git - muen/linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Suspend submission tasklets around wedging
[muen/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_clflush.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include "i915_gemfs.h"
39 #include <linux/dma-fence-array.h>
40 #include <linux/kthread.h>
41 #include <linux/reservation.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/slab.h>
44 #include <linux/stop_machine.h>
45 #include <linux/swap.h>
46 #include <linux/pci.h>
47 #include <linux/dma-buf.h>
48
49 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50
51 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52 {
53         if (obj->cache_dirty)
54                 return false;
55
56         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57                 return true;
58
59         return obj->pin_global; /* currently in use by HW, keep flushed */
60 }
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 /* some bookkeeping */
80 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81                                   u64 size)
82 {
83         spin_lock(&dev_priv->mm.object_stat_lock);
84         dev_priv->mm.object_count++;
85         dev_priv->mm.object_memory += size;
86         spin_unlock(&dev_priv->mm.object_stat_lock);
87 }
88
89 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90                                      u64 size)
91 {
92         spin_lock(&dev_priv->mm.object_stat_lock);
93         dev_priv->mm.object_count--;
94         dev_priv->mm.object_memory -= size;
95         spin_unlock(&dev_priv->mm.object_stat_lock);
96 }
97
98 static int
99 i915_gem_wait_for_error(struct i915_gpu_error *error)
100 {
101         int ret;
102
103         might_sleep();
104
105         /*
106          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107          * userspace. If it takes that long something really bad is going on and
108          * we should simply try to bail out and fail as gracefully as possible.
109          */
110         ret = wait_event_interruptible_timeout(error->reset_queue,
111                                                !i915_reset_backoff(error),
112                                                I915_RESET_TIMEOUT);
113         if (ret == 0) {
114                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115                 return -EIO;
116         } else if (ret < 0) {
117                 return ret;
118         } else {
119                 return 0;
120         }
121 }
122
123 int i915_mutex_lock_interruptible(struct drm_device *dev)
124 {
125         struct drm_i915_private *dev_priv = to_i915(dev);
126         int ret;
127
128         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129         if (ret)
130                 return ret;
131
132         ret = mutex_lock_interruptible(&dev->struct_mutex);
133         if (ret)
134                 return ret;
135
136         return 0;
137 }
138
139 int
140 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141                             struct drm_file *file)
142 {
143         struct drm_i915_private *dev_priv = to_i915(dev);
144         struct i915_ggtt *ggtt = &dev_priv->ggtt;
145         struct drm_i915_gem_get_aperture *args = data;
146         struct i915_vma *vma;
147         u64 pinned;
148
149         pinned = ggtt->base.reserved;
150         mutex_lock(&dev->struct_mutex);
151         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152                 if (i915_vma_is_pinned(vma))
153                         pinned += vma->node.size;
154         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155                 if (i915_vma_is_pinned(vma))
156                         pinned += vma->node.size;
157         mutex_unlock(&dev->struct_mutex);
158
159         args->aper_size = ggtt->base.total;
160         args->aper_available_size = args->aper_size - pinned;
161
162         return 0;
163 }
164
165 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166 {
167         struct address_space *mapping = obj->base.filp->f_mapping;
168         drm_dma_handle_t *phys;
169         struct sg_table *st;
170         struct scatterlist *sg;
171         char *vaddr;
172         int i;
173         int err;
174
175         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176                 return -EINVAL;
177
178         /* Always aligning to the object size, allows a single allocation
179          * to handle all possible callers, and given typical object sizes,
180          * the alignment of the buddy allocation will naturally match.
181          */
182         phys = drm_pci_alloc(obj->base.dev,
183                              roundup_pow_of_two(obj->base.size),
184                              roundup_pow_of_two(obj->base.size));
185         if (!phys)
186                 return -ENOMEM;
187
188         vaddr = phys->vaddr;
189         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190                 struct page *page;
191                 char *src;
192
193                 page = shmem_read_mapping_page(mapping, i);
194                 if (IS_ERR(page)) {
195                         err = PTR_ERR(page);
196                         goto err_phys;
197                 }
198
199                 src = kmap_atomic(page);
200                 memcpy(vaddr, src, PAGE_SIZE);
201                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202                 kunmap_atomic(src);
203
204                 put_page(page);
205                 vaddr += PAGE_SIZE;
206         }
207
208         i915_gem_chipset_flush(to_i915(obj->base.dev));
209
210         st = kmalloc(sizeof(*st), GFP_KERNEL);
211         if (!st) {
212                 err = -ENOMEM;
213                 goto err_phys;
214         }
215
216         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217                 kfree(st);
218                 err = -ENOMEM;
219                 goto err_phys;
220         }
221
222         sg = st->sgl;
223         sg->offset = 0;
224         sg->length = obj->base.size;
225
226         sg_dma_address(sg) = phys->busaddr;
227         sg_dma_len(sg) = obj->base.size;
228
229         obj->phys_handle = phys;
230
231         __i915_gem_object_set_pages(obj, st, sg->length);
232
233         return 0;
234
235 err_phys:
236         drm_pci_free(obj->base.dev, phys);
237
238         return err;
239 }
240
241 static void __start_cpu_write(struct drm_i915_gem_object *obj)
242 {
243         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245         if (cpu_write_needs_clflush(obj))
246                 obj->cache_dirty = true;
247 }
248
249 static void
250 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251                                 struct sg_table *pages,
252                                 bool needs_clflush)
253 {
254         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255
256         if (obj->mm.madv == I915_MADV_DONTNEED)
257                 obj->mm.dirty = false;
258
259         if (needs_clflush &&
260             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262                 drm_clflush_sg(pages);
263
264         __start_cpu_write(obj);
265 }
266
267 static void
268 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269                                struct sg_table *pages)
270 {
271         __i915_gem_object_release_shmem(obj, pages, false);
272
273         if (obj->mm.dirty) {
274                 struct address_space *mapping = obj->base.filp->f_mapping;
275                 char *vaddr = obj->phys_handle->vaddr;
276                 int i;
277
278                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279                         struct page *page;
280                         char *dst;
281
282                         page = shmem_read_mapping_page(mapping, i);
283                         if (IS_ERR(page))
284                                 continue;
285
286                         dst = kmap_atomic(page);
287                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
288                         memcpy(dst, vaddr, PAGE_SIZE);
289                         kunmap_atomic(dst);
290
291                         set_page_dirty(page);
292                         if (obj->mm.madv == I915_MADV_WILLNEED)
293                                 mark_page_accessed(page);
294                         put_page(page);
295                         vaddr += PAGE_SIZE;
296                 }
297                 obj->mm.dirty = false;
298         }
299
300         sg_free_table(pages);
301         kfree(pages);
302
303         drm_pci_free(obj->base.dev, obj->phys_handle);
304 }
305
306 static void
307 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308 {
309         i915_gem_object_unpin_pages(obj);
310 }
311
312 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313         .get_pages = i915_gem_object_get_pages_phys,
314         .put_pages = i915_gem_object_put_pages_phys,
315         .release = i915_gem_object_release_phys,
316 };
317
318 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
320 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 {
322         struct i915_vma *vma;
323         LIST_HEAD(still_in_list);
324         int ret;
325
326         lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328         /* Closed vma are removed from the obj->vma_list - but they may
329          * still have an active binding on the object. To remove those we
330          * must wait for all rendering to complete to the object (as unbinding
331          * must anyway), and retire the requests.
332          */
333         ret = i915_gem_object_set_to_cpu_domain(obj, false);
334         if (ret)
335                 return ret;
336
337         while ((vma = list_first_entry_or_null(&obj->vma_list,
338                                                struct i915_vma,
339                                                obj_link))) {
340                 list_move_tail(&vma->obj_link, &still_in_list);
341                 ret = i915_vma_unbind(vma);
342                 if (ret)
343                         break;
344         }
345         list_splice(&still_in_list, &obj->vma_list);
346
347         return ret;
348 }
349
350 static long
351 i915_gem_object_wait_fence(struct dma_fence *fence,
352                            unsigned int flags,
353                            long timeout,
354                            struct intel_rps_client *rps_client)
355 {
356         struct drm_i915_gem_request *rq;
357
358         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
359
360         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
361                 return timeout;
362
363         if (!dma_fence_is_i915(fence))
364                 return dma_fence_wait_timeout(fence,
365                                               flags & I915_WAIT_INTERRUPTIBLE,
366                                               timeout);
367
368         rq = to_request(fence);
369         if (i915_gem_request_completed(rq))
370                 goto out;
371
372         /* This client is about to stall waiting for the GPU. In many cases
373          * this is undesirable and limits the throughput of the system, as
374          * many clients cannot continue processing user input/output whilst
375          * blocked. RPS autotuning may take tens of milliseconds to respond
376          * to the GPU load and thus incurs additional latency for the client.
377          * We can circumvent that by promoting the GPU frequency to maximum
378          * before we wait. This makes the GPU throttle up much more quickly
379          * (good for benchmarks and user experience, e.g. window animations),
380          * but at a cost of spending more power processing the workload
381          * (bad for battery). Not all clients even want their results
382          * immediately and for them we should just let the GPU select its own
383          * frequency to maximise efficiency. To prevent a single client from
384          * forcing the clocks too high for the whole system, we only allow
385          * each client to waitboost once in a busy period.
386          */
387         if (rps_client) {
388                 if (INTEL_GEN(rq->i915) >= 6)
389                         gen6_rps_boost(rq, rps_client);
390                 else
391                         rps_client = NULL;
392         }
393
394         timeout = i915_wait_request(rq, flags, timeout);
395
396 out:
397         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
398                 i915_gem_request_retire_upto(rq);
399
400         return timeout;
401 }
402
403 static long
404 i915_gem_object_wait_reservation(struct reservation_object *resv,
405                                  unsigned int flags,
406                                  long timeout,
407                                  struct intel_rps_client *rps_client)
408 {
409         unsigned int seq = __read_seqcount_begin(&resv->seq);
410         struct dma_fence *excl;
411         bool prune_fences = false;
412
413         if (flags & I915_WAIT_ALL) {
414                 struct dma_fence **shared;
415                 unsigned int count, i;
416                 int ret;
417
418                 ret = reservation_object_get_fences_rcu(resv,
419                                                         &excl, &count, &shared);
420                 if (ret)
421                         return ret;
422
423                 for (i = 0; i < count; i++) {
424                         timeout = i915_gem_object_wait_fence(shared[i],
425                                                              flags, timeout,
426                                                              rps_client);
427                         if (timeout < 0)
428                                 break;
429
430                         dma_fence_put(shared[i]);
431                 }
432
433                 for (; i < count; i++)
434                         dma_fence_put(shared[i]);
435                 kfree(shared);
436
437                 prune_fences = count && timeout >= 0;
438         } else {
439                 excl = reservation_object_get_excl_rcu(resv);
440         }
441
442         if (excl && timeout >= 0) {
443                 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444                                                      rps_client);
445                 prune_fences = timeout >= 0;
446         }
447
448         dma_fence_put(excl);
449
450         /* Oportunistically prune the fences iff we know they have *all* been
451          * signaled and that the reservation object has not been changed (i.e.
452          * no new fences have been added).
453          */
454         if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
455                 if (reservation_object_trylock(resv)) {
456                         if (!__read_seqcount_retry(&resv->seq, seq))
457                                 reservation_object_add_excl_fence(resv, NULL);
458                         reservation_object_unlock(resv);
459                 }
460         }
461
462         return timeout;
463 }
464
465 static void __fence_set_priority(struct dma_fence *fence, int prio)
466 {
467         struct drm_i915_gem_request *rq;
468         struct intel_engine_cs *engine;
469
470         if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471                 return;
472
473         rq = to_request(fence);
474         engine = rq->engine;
475         if (!engine->schedule)
476                 return;
477
478         engine->schedule(rq, prio);
479 }
480
481 static void fence_set_priority(struct dma_fence *fence, int prio)
482 {
483         /* Recurse once into a fence-array */
484         if (dma_fence_is_array(fence)) {
485                 struct dma_fence_array *array = to_dma_fence_array(fence);
486                 int i;
487
488                 for (i = 0; i < array->num_fences; i++)
489                         __fence_set_priority(array->fences[i], prio);
490         } else {
491                 __fence_set_priority(fence, prio);
492         }
493 }
494
495 int
496 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
497                               unsigned int flags,
498                               int prio)
499 {
500         struct dma_fence *excl;
501
502         if (flags & I915_WAIT_ALL) {
503                 struct dma_fence **shared;
504                 unsigned int count, i;
505                 int ret;
506
507                 ret = reservation_object_get_fences_rcu(obj->resv,
508                                                         &excl, &count, &shared);
509                 if (ret)
510                         return ret;
511
512                 for (i = 0; i < count; i++) {
513                         fence_set_priority(shared[i], prio);
514                         dma_fence_put(shared[i]);
515                 }
516
517                 kfree(shared);
518         } else {
519                 excl = reservation_object_get_excl_rcu(obj->resv);
520         }
521
522         if (excl) {
523                 fence_set_priority(excl, prio);
524                 dma_fence_put(excl);
525         }
526         return 0;
527 }
528
529 /**
530  * Waits for rendering to the object to be completed
531  * @obj: i915 gem object
532  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
533  * @timeout: how long to wait
534  * @rps_client: client (user process) to charge for any waitboosting
535  */
536 int
537 i915_gem_object_wait(struct drm_i915_gem_object *obj,
538                      unsigned int flags,
539                      long timeout,
540                      struct intel_rps_client *rps_client)
541 {
542         might_sleep();
543 #if IS_ENABLED(CONFIG_LOCKDEP)
544         GEM_BUG_ON(debug_locks &&
545                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
546                    !!(flags & I915_WAIT_LOCKED));
547 #endif
548         GEM_BUG_ON(timeout < 0);
549
550         timeout = i915_gem_object_wait_reservation(obj->resv,
551                                                    flags, timeout,
552                                                    rps_client);
553         return timeout < 0 ? timeout : 0;
554 }
555
556 static struct intel_rps_client *to_rps_client(struct drm_file *file)
557 {
558         struct drm_i915_file_private *fpriv = file->driver_priv;
559
560         return &fpriv->rps_client;
561 }
562
563 static int
564 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
565                      struct drm_i915_gem_pwrite *args,
566                      struct drm_file *file)
567 {
568         void *vaddr = obj->phys_handle->vaddr + args->offset;
569         char __user *user_data = u64_to_user_ptr(args->data_ptr);
570
571         /* We manually control the domain here and pretend that it
572          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
573          */
574         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
575         if (copy_from_user(vaddr, user_data, args->size))
576                 return -EFAULT;
577
578         drm_clflush_virt_range(vaddr, args->size);
579         i915_gem_chipset_flush(to_i915(obj->base.dev));
580
581         intel_fb_obj_flush(obj, ORIGIN_CPU);
582         return 0;
583 }
584
585 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
586 {
587         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
588 }
589
590 void i915_gem_object_free(struct drm_i915_gem_object *obj)
591 {
592         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
593         kmem_cache_free(dev_priv->objects, obj);
594 }
595
596 static int
597 i915_gem_create(struct drm_file *file,
598                 struct drm_i915_private *dev_priv,
599                 uint64_t size,
600                 uint32_t *handle_p)
601 {
602         struct drm_i915_gem_object *obj;
603         int ret;
604         u32 handle;
605
606         size = roundup(size, PAGE_SIZE);
607         if (size == 0)
608                 return -EINVAL;
609
610         /* Allocate the new object */
611         obj = i915_gem_object_create(dev_priv, size);
612         if (IS_ERR(obj))
613                 return PTR_ERR(obj);
614
615         ret = drm_gem_handle_create(file, &obj->base, &handle);
616         /* drop reference from allocate - handle holds it now */
617         i915_gem_object_put(obj);
618         if (ret)
619                 return ret;
620
621         *handle_p = handle;
622         return 0;
623 }
624
625 int
626 i915_gem_dumb_create(struct drm_file *file,
627                      struct drm_device *dev,
628                      struct drm_mode_create_dumb *args)
629 {
630         /* have to work out size/pitch and return them */
631         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
632         args->size = args->pitch * args->height;
633         return i915_gem_create(file, to_i915(dev),
634                                args->size, &args->handle);
635 }
636
637 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
638 {
639         return !(obj->cache_level == I915_CACHE_NONE ||
640                  obj->cache_level == I915_CACHE_WT);
641 }
642
643 /**
644  * Creates a new mm object and returns a handle to it.
645  * @dev: drm device pointer
646  * @data: ioctl data blob
647  * @file: drm file pointer
648  */
649 int
650 i915_gem_create_ioctl(struct drm_device *dev, void *data,
651                       struct drm_file *file)
652 {
653         struct drm_i915_private *dev_priv = to_i915(dev);
654         struct drm_i915_gem_create *args = data;
655
656         i915_gem_flush_free_objects(dev_priv);
657
658         return i915_gem_create(file, dev_priv,
659                                args->size, &args->handle);
660 }
661
662 static inline enum fb_op_origin
663 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
664 {
665         return (domain == I915_GEM_DOMAIN_GTT ?
666                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
667 }
668
669 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
670 {
671         /*
672          * No actual flushing is required for the GTT write domain for reads
673          * from the GTT domain. Writes to it "immediately" go to main memory
674          * as far as we know, so there's no chipset flush. It also doesn't
675          * land in the GPU render cache.
676          *
677          * However, we do have to enforce the order so that all writes through
678          * the GTT land before any writes to the device, such as updates to
679          * the GATT itself.
680          *
681          * We also have to wait a bit for the writes to land from the GTT.
682          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
683          * timing. This issue has only been observed when switching quickly
684          * between GTT writes and CPU reads from inside the kernel on recent hw,
685          * and it appears to only affect discrete GTT blocks (i.e. on LLC
686          * system agents we cannot reproduce this behaviour, until Cannonlake
687          * that was!).
688          */
689
690         wmb();
691
692         intel_runtime_pm_get(dev_priv);
693         spin_lock_irq(&dev_priv->uncore.lock);
694
695         POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
696
697         spin_unlock_irq(&dev_priv->uncore.lock);
698         intel_runtime_pm_put(dev_priv);
699 }
700
701 static void
702 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
703 {
704         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
705         struct i915_vma *vma;
706
707         if (!(obj->base.write_domain & flush_domains))
708                 return;
709
710         switch (obj->base.write_domain) {
711         case I915_GEM_DOMAIN_GTT:
712                 i915_gem_flush_ggtt_writes(dev_priv);
713
714                 intel_fb_obj_flush(obj,
715                                    fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
716
717                 for_each_ggtt_vma(vma, obj) {
718                         if (vma->iomap)
719                                 continue;
720
721                         i915_vma_unset_ggtt_write(vma);
722                 }
723                 break;
724
725         case I915_GEM_DOMAIN_CPU:
726                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
727                 break;
728
729         case I915_GEM_DOMAIN_RENDER:
730                 if (gpu_write_needs_clflush(obj))
731                         obj->cache_dirty = true;
732                 break;
733         }
734
735         obj->base.write_domain = 0;
736 }
737
738 static inline int
739 __copy_to_user_swizzled(char __user *cpu_vaddr,
740                         const char *gpu_vaddr, int gpu_offset,
741                         int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
751                                      gpu_vaddr + swizzled_gpu_offset,
752                                      this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 static inline int
765 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
766                           const char __user *cpu_vaddr,
767                           int length)
768 {
769         int ret, cpu_offset = 0;
770
771         while (length > 0) {
772                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
773                 int this_length = min(cacheline_end - gpu_offset, length);
774                 int swizzled_gpu_offset = gpu_offset ^ 64;
775
776                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
777                                        cpu_vaddr + cpu_offset,
778                                        this_length);
779                 if (ret)
780                         return ret + length;
781
782                 cpu_offset += this_length;
783                 gpu_offset += this_length;
784                 length -= this_length;
785         }
786
787         return 0;
788 }
789
790 /*
791  * Pins the specified object's pages and synchronizes the object with
792  * GPU accesses. Sets needs_clflush to non-zero if the caller should
793  * flush the object from the CPU cache.
794  */
795 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
796                                     unsigned int *needs_clflush)
797 {
798         int ret;
799
800         lockdep_assert_held(&obj->base.dev->struct_mutex);
801
802         *needs_clflush = 0;
803         if (!i915_gem_object_has_struct_page(obj))
804                 return -ENODEV;
805
806         ret = i915_gem_object_wait(obj,
807                                    I915_WAIT_INTERRUPTIBLE |
808                                    I915_WAIT_LOCKED,
809                                    MAX_SCHEDULE_TIMEOUT,
810                                    NULL);
811         if (ret)
812                 return ret;
813
814         ret = i915_gem_object_pin_pages(obj);
815         if (ret)
816                 return ret;
817
818         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
819             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
820                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
821                 if (ret)
822                         goto err_unpin;
823                 else
824                         goto out;
825         }
826
827         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
828
829         /* If we're not in the cpu read domain, set ourself into the gtt
830          * read domain and manually flush cachelines (if required). This
831          * optimizes for the case when the gpu will dirty the data
832          * anyway again before the next pread happens.
833          */
834         if (!obj->cache_dirty &&
835             !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
836                 *needs_clflush = CLFLUSH_BEFORE;
837
838 out:
839         /* return with the pages pinned */
840         return 0;
841
842 err_unpin:
843         i915_gem_object_unpin_pages(obj);
844         return ret;
845 }
846
847 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
848                                      unsigned int *needs_clflush)
849 {
850         int ret;
851
852         lockdep_assert_held(&obj->base.dev->struct_mutex);
853
854         *needs_clflush = 0;
855         if (!i915_gem_object_has_struct_page(obj))
856                 return -ENODEV;
857
858         ret = i915_gem_object_wait(obj,
859                                    I915_WAIT_INTERRUPTIBLE |
860                                    I915_WAIT_LOCKED |
861                                    I915_WAIT_ALL,
862                                    MAX_SCHEDULE_TIMEOUT,
863                                    NULL);
864         if (ret)
865                 return ret;
866
867         ret = i915_gem_object_pin_pages(obj);
868         if (ret)
869                 return ret;
870
871         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
872             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
873                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
874                 if (ret)
875                         goto err_unpin;
876                 else
877                         goto out;
878         }
879
880         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
881
882         /* If we're not in the cpu write domain, set ourself into the
883          * gtt write domain and manually flush cachelines (as required).
884          * This optimizes for the case when the gpu will use the data
885          * right away and we therefore have to clflush anyway.
886          */
887         if (!obj->cache_dirty) {
888                 *needs_clflush |= CLFLUSH_AFTER;
889
890                 /*
891                  * Same trick applies to invalidate partially written
892                  * cachelines read before writing.
893                  */
894                 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
895                         *needs_clflush |= CLFLUSH_BEFORE;
896         }
897
898 out:
899         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
900         obj->mm.dirty = true;
901         /* return with the pages pinned */
902         return 0;
903
904 err_unpin:
905         i915_gem_object_unpin_pages(obj);
906         return ret;
907 }
908
909 static void
910 shmem_clflush_swizzled_range(char *addr, unsigned long length,
911                              bool swizzled)
912 {
913         if (unlikely(swizzled)) {
914                 unsigned long start = (unsigned long) addr;
915                 unsigned long end = (unsigned long) addr + length;
916
917                 /* For swizzling simply ensure that we always flush both
918                  * channels. Lame, but simple and it works. Swizzled
919                  * pwrite/pread is far from a hotpath - current userspace
920                  * doesn't use it at all. */
921                 start = round_down(start, 128);
922                 end = round_up(end, 128);
923
924                 drm_clflush_virt_range((void *)start, end - start);
925         } else {
926                 drm_clflush_virt_range(addr, length);
927         }
928
929 }
930
931 /* Only difference to the fast-path function is that this can handle bit17
932  * and uses non-atomic copy and kmap functions. */
933 static int
934 shmem_pread_slow(struct page *page, int offset, int length,
935                  char __user *user_data,
936                  bool page_do_bit17_swizzling, bool needs_clflush)
937 {
938         char *vaddr;
939         int ret;
940
941         vaddr = kmap(page);
942         if (needs_clflush)
943                 shmem_clflush_swizzled_range(vaddr + offset, length,
944                                              page_do_bit17_swizzling);
945
946         if (page_do_bit17_swizzling)
947                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
948         else
949                 ret = __copy_to_user(user_data, vaddr + offset, length);
950         kunmap(page);
951
952         return ret ? - EFAULT : 0;
953 }
954
955 static int
956 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
957             bool page_do_bit17_swizzling, bool needs_clflush)
958 {
959         int ret;
960
961         ret = -ENODEV;
962         if (!page_do_bit17_swizzling) {
963                 char *vaddr = kmap_atomic(page);
964
965                 if (needs_clflush)
966                         drm_clflush_virt_range(vaddr + offset, length);
967                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
968                 kunmap_atomic(vaddr);
969         }
970         if (ret == 0)
971                 return 0;
972
973         return shmem_pread_slow(page, offset, length, user_data,
974                                 page_do_bit17_swizzling, needs_clflush);
975 }
976
977 static int
978 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
979                      struct drm_i915_gem_pread *args)
980 {
981         char __user *user_data;
982         u64 remain;
983         unsigned int obj_do_bit17_swizzling;
984         unsigned int needs_clflush;
985         unsigned int idx, offset;
986         int ret;
987
988         obj_do_bit17_swizzling = 0;
989         if (i915_gem_object_needs_bit17_swizzle(obj))
990                 obj_do_bit17_swizzling = BIT(17);
991
992         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
993         if (ret)
994                 return ret;
995
996         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
997         mutex_unlock(&obj->base.dev->struct_mutex);
998         if (ret)
999                 return ret;
1000
1001         remain = args->size;
1002         user_data = u64_to_user_ptr(args->data_ptr);
1003         offset = offset_in_page(args->offset);
1004         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1005                 struct page *page = i915_gem_object_get_page(obj, idx);
1006                 int length;
1007
1008                 length = remain;
1009                 if (offset + length > PAGE_SIZE)
1010                         length = PAGE_SIZE - offset;
1011
1012                 ret = shmem_pread(page, offset, length, user_data,
1013                                   page_to_phys(page) & obj_do_bit17_swizzling,
1014                                   needs_clflush);
1015                 if (ret)
1016                         break;
1017
1018                 remain -= length;
1019                 user_data += length;
1020                 offset = 0;
1021         }
1022
1023         i915_gem_obj_finish_shmem_access(obj);
1024         return ret;
1025 }
1026
1027 static inline bool
1028 gtt_user_read(struct io_mapping *mapping,
1029               loff_t base, int offset,
1030               char __user *user_data, int length)
1031 {
1032         void __iomem *vaddr;
1033         unsigned long unwritten;
1034
1035         /* We can use the cpu mem copy function because this is X86. */
1036         vaddr = io_mapping_map_atomic_wc(mapping, base);
1037         unwritten = __copy_to_user_inatomic(user_data,
1038                                             (void __force *)vaddr + offset,
1039                                             length);
1040         io_mapping_unmap_atomic(vaddr);
1041         if (unwritten) {
1042                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1043                 unwritten = copy_to_user(user_data,
1044                                          (void __force *)vaddr + offset,
1045                                          length);
1046                 io_mapping_unmap(vaddr);
1047         }
1048         return unwritten;
1049 }
1050
1051 static int
1052 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1053                    const struct drm_i915_gem_pread *args)
1054 {
1055         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1056         struct i915_ggtt *ggtt = &i915->ggtt;
1057         struct drm_mm_node node;
1058         struct i915_vma *vma;
1059         void __user *user_data;
1060         u64 remain, offset;
1061         int ret;
1062
1063         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1064         if (ret)
1065                 return ret;
1066
1067         intel_runtime_pm_get(i915);
1068         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1069                                        PIN_MAPPABLE |
1070                                        PIN_NONFAULT |
1071                                        PIN_NONBLOCK);
1072         if (!IS_ERR(vma)) {
1073                 node.start = i915_ggtt_offset(vma);
1074                 node.allocated = false;
1075                 ret = i915_vma_put_fence(vma);
1076                 if (ret) {
1077                         i915_vma_unpin(vma);
1078                         vma = ERR_PTR(ret);
1079                 }
1080         }
1081         if (IS_ERR(vma)) {
1082                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1083                 if (ret)
1084                         goto out_unlock;
1085                 GEM_BUG_ON(!node.allocated);
1086         }
1087
1088         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1089         if (ret)
1090                 goto out_unpin;
1091
1092         mutex_unlock(&i915->drm.struct_mutex);
1093
1094         user_data = u64_to_user_ptr(args->data_ptr);
1095         remain = args->size;
1096         offset = args->offset;
1097
1098         while (remain > 0) {
1099                 /* Operation in this page
1100                  *
1101                  * page_base = page offset within aperture
1102                  * page_offset = offset within page
1103                  * page_length = bytes to copy for this page
1104                  */
1105                 u32 page_base = node.start;
1106                 unsigned page_offset = offset_in_page(offset);
1107                 unsigned page_length = PAGE_SIZE - page_offset;
1108                 page_length = remain < page_length ? remain : page_length;
1109                 if (node.allocated) {
1110                         wmb();
1111                         ggtt->base.insert_page(&ggtt->base,
1112                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1113                                                node.start, I915_CACHE_NONE, 0);
1114                         wmb();
1115                 } else {
1116                         page_base += offset & PAGE_MASK;
1117                 }
1118
1119                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1120                                   user_data, page_length)) {
1121                         ret = -EFAULT;
1122                         break;
1123                 }
1124
1125                 remain -= page_length;
1126                 user_data += page_length;
1127                 offset += page_length;
1128         }
1129
1130         mutex_lock(&i915->drm.struct_mutex);
1131 out_unpin:
1132         if (node.allocated) {
1133                 wmb();
1134                 ggtt->base.clear_range(&ggtt->base,
1135                                        node.start, node.size);
1136                 remove_mappable_node(&node);
1137         } else {
1138                 i915_vma_unpin(vma);
1139         }
1140 out_unlock:
1141         intel_runtime_pm_put(i915);
1142         mutex_unlock(&i915->drm.struct_mutex);
1143
1144         return ret;
1145 }
1146
1147 /**
1148  * Reads data from the object referenced by handle.
1149  * @dev: drm device pointer
1150  * @data: ioctl data blob
1151  * @file: drm file pointer
1152  *
1153  * On error, the contents of *data are undefined.
1154  */
1155 int
1156 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1157                      struct drm_file *file)
1158 {
1159         struct drm_i915_gem_pread *args = data;
1160         struct drm_i915_gem_object *obj;
1161         int ret;
1162
1163         if (args->size == 0)
1164                 return 0;
1165
1166         if (!access_ok(VERIFY_WRITE,
1167                        u64_to_user_ptr(args->data_ptr),
1168                        args->size))
1169                 return -EFAULT;
1170
1171         obj = i915_gem_object_lookup(file, args->handle);
1172         if (!obj)
1173                 return -ENOENT;
1174
1175         /* Bounds check source.  */
1176         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1177                 ret = -EINVAL;
1178                 goto out;
1179         }
1180
1181         trace_i915_gem_object_pread(obj, args->offset, args->size);
1182
1183         ret = i915_gem_object_wait(obj,
1184                                    I915_WAIT_INTERRUPTIBLE,
1185                                    MAX_SCHEDULE_TIMEOUT,
1186                                    to_rps_client(file));
1187         if (ret)
1188                 goto out;
1189
1190         ret = i915_gem_object_pin_pages(obj);
1191         if (ret)
1192                 goto out;
1193
1194         ret = i915_gem_shmem_pread(obj, args);
1195         if (ret == -EFAULT || ret == -ENODEV)
1196                 ret = i915_gem_gtt_pread(obj, args);
1197
1198         i915_gem_object_unpin_pages(obj);
1199 out:
1200         i915_gem_object_put(obj);
1201         return ret;
1202 }
1203
1204 /* This is the fast write path which cannot handle
1205  * page faults in the source data
1206  */
1207
1208 static inline bool
1209 ggtt_write(struct io_mapping *mapping,
1210            loff_t base, int offset,
1211            char __user *user_data, int length)
1212 {
1213         void __iomem *vaddr;
1214         unsigned long unwritten;
1215
1216         /* We can use the cpu mem copy function because this is X86. */
1217         vaddr = io_mapping_map_atomic_wc(mapping, base);
1218         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1219                                                       user_data, length);
1220         io_mapping_unmap_atomic(vaddr);
1221         if (unwritten) {
1222                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1223                 unwritten = copy_from_user((void __force *)vaddr + offset,
1224                                            user_data, length);
1225                 io_mapping_unmap(vaddr);
1226         }
1227
1228         return unwritten;
1229 }
1230
1231 /**
1232  * This is the fast pwrite path, where we copy the data directly from the
1233  * user into the GTT, uncached.
1234  * @obj: i915 GEM object
1235  * @args: pwrite arguments structure
1236  */
1237 static int
1238 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1239                          const struct drm_i915_gem_pwrite *args)
1240 {
1241         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1242         struct i915_ggtt *ggtt = &i915->ggtt;
1243         struct drm_mm_node node;
1244         struct i915_vma *vma;
1245         u64 remain, offset;
1246         void __user *user_data;
1247         int ret;
1248
1249         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1250         if (ret)
1251                 return ret;
1252
1253         if (i915_gem_object_has_struct_page(obj)) {
1254                 /*
1255                  * Avoid waking the device up if we can fallback, as
1256                  * waking/resuming is very slow (worst-case 10-100 ms
1257                  * depending on PCI sleeps and our own resume time).
1258                  * This easily dwarfs any performance advantage from
1259                  * using the cache bypass of indirect GGTT access.
1260                  */
1261                 if (!intel_runtime_pm_get_if_in_use(i915)) {
1262                         ret = -EFAULT;
1263                         goto out_unlock;
1264                 }
1265         } else {
1266                 /* No backing pages, no fallback, we must force GGTT access */
1267                 intel_runtime_pm_get(i915);
1268         }
1269
1270         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1271                                        PIN_MAPPABLE |
1272                                        PIN_NONFAULT |
1273                                        PIN_NONBLOCK);
1274         if (!IS_ERR(vma)) {
1275                 node.start = i915_ggtt_offset(vma);
1276                 node.allocated = false;
1277                 ret = i915_vma_put_fence(vma);
1278                 if (ret) {
1279                         i915_vma_unpin(vma);
1280                         vma = ERR_PTR(ret);
1281                 }
1282         }
1283         if (IS_ERR(vma)) {
1284                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1285                 if (ret)
1286                         goto out_rpm;
1287                 GEM_BUG_ON(!node.allocated);
1288         }
1289
1290         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1291         if (ret)
1292                 goto out_unpin;
1293
1294         mutex_unlock(&i915->drm.struct_mutex);
1295
1296         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1297
1298         user_data = u64_to_user_ptr(args->data_ptr);
1299         offset = args->offset;
1300         remain = args->size;
1301         while (remain) {
1302                 /* Operation in this page
1303                  *
1304                  * page_base = page offset within aperture
1305                  * page_offset = offset within page
1306                  * page_length = bytes to copy for this page
1307                  */
1308                 u32 page_base = node.start;
1309                 unsigned int page_offset = offset_in_page(offset);
1310                 unsigned int page_length = PAGE_SIZE - page_offset;
1311                 page_length = remain < page_length ? remain : page_length;
1312                 if (node.allocated) {
1313                         wmb(); /* flush the write before we modify the GGTT */
1314                         ggtt->base.insert_page(&ggtt->base,
1315                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1316                                                node.start, I915_CACHE_NONE, 0);
1317                         wmb(); /* flush modifications to the GGTT (insert_page) */
1318                 } else {
1319                         page_base += offset & PAGE_MASK;
1320                 }
1321                 /* If we get a fault while copying data, then (presumably) our
1322                  * source page isn't available.  Return the error and we'll
1323                  * retry in the slow path.
1324                  * If the object is non-shmem backed, we retry again with the
1325                  * path that handles page fault.
1326                  */
1327                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1328                                user_data, page_length)) {
1329                         ret = -EFAULT;
1330                         break;
1331                 }
1332
1333                 remain -= page_length;
1334                 user_data += page_length;
1335                 offset += page_length;
1336         }
1337         intel_fb_obj_flush(obj, ORIGIN_CPU);
1338
1339         mutex_lock(&i915->drm.struct_mutex);
1340 out_unpin:
1341         if (node.allocated) {
1342                 wmb();
1343                 ggtt->base.clear_range(&ggtt->base,
1344                                        node.start, node.size);
1345                 remove_mappable_node(&node);
1346         } else {
1347                 i915_vma_unpin(vma);
1348         }
1349 out_rpm:
1350         intel_runtime_pm_put(i915);
1351 out_unlock:
1352         mutex_unlock(&i915->drm.struct_mutex);
1353         return ret;
1354 }
1355
1356 static int
1357 shmem_pwrite_slow(struct page *page, int offset, int length,
1358                   char __user *user_data,
1359                   bool page_do_bit17_swizzling,
1360                   bool needs_clflush_before,
1361                   bool needs_clflush_after)
1362 {
1363         char *vaddr;
1364         int ret;
1365
1366         vaddr = kmap(page);
1367         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1368                 shmem_clflush_swizzled_range(vaddr + offset, length,
1369                                              page_do_bit17_swizzling);
1370         if (page_do_bit17_swizzling)
1371                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1372                                                 length);
1373         else
1374                 ret = __copy_from_user(vaddr + offset, user_data, length);
1375         if (needs_clflush_after)
1376                 shmem_clflush_swizzled_range(vaddr + offset, length,
1377                                              page_do_bit17_swizzling);
1378         kunmap(page);
1379
1380         return ret ? -EFAULT : 0;
1381 }
1382
1383 /* Per-page copy function for the shmem pwrite fastpath.
1384  * Flushes invalid cachelines before writing to the target if
1385  * needs_clflush_before is set and flushes out any written cachelines after
1386  * writing if needs_clflush is set.
1387  */
1388 static int
1389 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1390              bool page_do_bit17_swizzling,
1391              bool needs_clflush_before,
1392              bool needs_clflush_after)
1393 {
1394         int ret;
1395
1396         ret = -ENODEV;
1397         if (!page_do_bit17_swizzling) {
1398                 char *vaddr = kmap_atomic(page);
1399
1400                 if (needs_clflush_before)
1401                         drm_clflush_virt_range(vaddr + offset, len);
1402                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1403                 if (needs_clflush_after)
1404                         drm_clflush_virt_range(vaddr + offset, len);
1405
1406                 kunmap_atomic(vaddr);
1407         }
1408         if (ret == 0)
1409                 return ret;
1410
1411         return shmem_pwrite_slow(page, offset, len, user_data,
1412                                  page_do_bit17_swizzling,
1413                                  needs_clflush_before,
1414                                  needs_clflush_after);
1415 }
1416
1417 static int
1418 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1419                       const struct drm_i915_gem_pwrite *args)
1420 {
1421         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1422         void __user *user_data;
1423         u64 remain;
1424         unsigned int obj_do_bit17_swizzling;
1425         unsigned int partial_cacheline_write;
1426         unsigned int needs_clflush;
1427         unsigned int offset, idx;
1428         int ret;
1429
1430         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1431         if (ret)
1432                 return ret;
1433
1434         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1435         mutex_unlock(&i915->drm.struct_mutex);
1436         if (ret)
1437                 return ret;
1438
1439         obj_do_bit17_swizzling = 0;
1440         if (i915_gem_object_needs_bit17_swizzle(obj))
1441                 obj_do_bit17_swizzling = BIT(17);
1442
1443         /* If we don't overwrite a cacheline completely we need to be
1444          * careful to have up-to-date data by first clflushing. Don't
1445          * overcomplicate things and flush the entire patch.
1446          */
1447         partial_cacheline_write = 0;
1448         if (needs_clflush & CLFLUSH_BEFORE)
1449                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1450
1451         user_data = u64_to_user_ptr(args->data_ptr);
1452         remain = args->size;
1453         offset = offset_in_page(args->offset);
1454         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1455                 struct page *page = i915_gem_object_get_page(obj, idx);
1456                 int length;
1457
1458                 length = remain;
1459                 if (offset + length > PAGE_SIZE)
1460                         length = PAGE_SIZE - offset;
1461
1462                 ret = shmem_pwrite(page, offset, length, user_data,
1463                                    page_to_phys(page) & obj_do_bit17_swizzling,
1464                                    (offset | length) & partial_cacheline_write,
1465                                    needs_clflush & CLFLUSH_AFTER);
1466                 if (ret)
1467                         break;
1468
1469                 remain -= length;
1470                 user_data += length;
1471                 offset = 0;
1472         }
1473
1474         intel_fb_obj_flush(obj, ORIGIN_CPU);
1475         i915_gem_obj_finish_shmem_access(obj);
1476         return ret;
1477 }
1478
1479 /**
1480  * Writes data to the object referenced by handle.
1481  * @dev: drm device
1482  * @data: ioctl data blob
1483  * @file: drm file
1484  *
1485  * On error, the contents of the buffer that were to be modified are undefined.
1486  */
1487 int
1488 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1489                       struct drm_file *file)
1490 {
1491         struct drm_i915_gem_pwrite *args = data;
1492         struct drm_i915_gem_object *obj;
1493         int ret;
1494
1495         if (args->size == 0)
1496                 return 0;
1497
1498         if (!access_ok(VERIFY_READ,
1499                        u64_to_user_ptr(args->data_ptr),
1500                        args->size))
1501                 return -EFAULT;
1502
1503         obj = i915_gem_object_lookup(file, args->handle);
1504         if (!obj)
1505                 return -ENOENT;
1506
1507         /* Bounds check destination. */
1508         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1509                 ret = -EINVAL;
1510                 goto err;
1511         }
1512
1513         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1514
1515         ret = -ENODEV;
1516         if (obj->ops->pwrite)
1517                 ret = obj->ops->pwrite(obj, args);
1518         if (ret != -ENODEV)
1519                 goto err;
1520
1521         ret = i915_gem_object_wait(obj,
1522                                    I915_WAIT_INTERRUPTIBLE |
1523                                    I915_WAIT_ALL,
1524                                    MAX_SCHEDULE_TIMEOUT,
1525                                    to_rps_client(file));
1526         if (ret)
1527                 goto err;
1528
1529         ret = i915_gem_object_pin_pages(obj);
1530         if (ret)
1531                 goto err;
1532
1533         ret = -EFAULT;
1534         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1535          * it would end up going through the fenced access, and we'll get
1536          * different detiling behavior between reading and writing.
1537          * pread/pwrite currently are reading and writing from the CPU
1538          * perspective, requiring manual detiling by the client.
1539          */
1540         if (!i915_gem_object_has_struct_page(obj) ||
1541             cpu_write_needs_clflush(obj))
1542                 /* Note that the gtt paths might fail with non-page-backed user
1543                  * pointers (e.g. gtt mappings when moving data between
1544                  * textures). Fallback to the shmem path in that case.
1545                  */
1546                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1547
1548         if (ret == -EFAULT || ret == -ENOSPC) {
1549                 if (obj->phys_handle)
1550                         ret = i915_gem_phys_pwrite(obj, args, file);
1551                 else
1552                         ret = i915_gem_shmem_pwrite(obj, args);
1553         }
1554
1555         i915_gem_object_unpin_pages(obj);
1556 err:
1557         i915_gem_object_put(obj);
1558         return ret;
1559 }
1560
1561 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1562 {
1563         struct drm_i915_private *i915;
1564         struct list_head *list;
1565         struct i915_vma *vma;
1566
1567         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1568
1569         for_each_ggtt_vma(vma, obj) {
1570                 if (i915_vma_is_active(vma))
1571                         continue;
1572
1573                 if (!drm_mm_node_allocated(&vma->node))
1574                         continue;
1575
1576                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1577         }
1578
1579         i915 = to_i915(obj->base.dev);
1580         spin_lock(&i915->mm.obj_lock);
1581         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1582         list_move_tail(&obj->mm.link, list);
1583         spin_unlock(&i915->mm.obj_lock);
1584 }
1585
1586 /**
1587  * Called when user space prepares to use an object with the CPU, either
1588  * through the mmap ioctl's mapping or a GTT mapping.
1589  * @dev: drm device
1590  * @data: ioctl data blob
1591  * @file: drm file
1592  */
1593 int
1594 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1595                           struct drm_file *file)
1596 {
1597         struct drm_i915_gem_set_domain *args = data;
1598         struct drm_i915_gem_object *obj;
1599         uint32_t read_domains = args->read_domains;
1600         uint32_t write_domain = args->write_domain;
1601         int err;
1602
1603         /* Only handle setting domains to types used by the CPU. */
1604         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1605                 return -EINVAL;
1606
1607         /* Having something in the write domain implies it's in the read
1608          * domain, and only that read domain.  Enforce that in the request.
1609          */
1610         if (write_domain != 0 && read_domains != write_domain)
1611                 return -EINVAL;
1612
1613         obj = i915_gem_object_lookup(file, args->handle);
1614         if (!obj)
1615                 return -ENOENT;
1616
1617         /* Try to flush the object off the GPU without holding the lock.
1618          * We will repeat the flush holding the lock in the normal manner
1619          * to catch cases where we are gazumped.
1620          */
1621         err = i915_gem_object_wait(obj,
1622                                    I915_WAIT_INTERRUPTIBLE |
1623                                    (write_domain ? I915_WAIT_ALL : 0),
1624                                    MAX_SCHEDULE_TIMEOUT,
1625                                    to_rps_client(file));
1626         if (err)
1627                 goto out;
1628
1629         /*
1630          * Proxy objects do not control access to the backing storage, ergo
1631          * they cannot be used as a means to manipulate the cache domain
1632          * tracking for that backing storage. The proxy object is always
1633          * considered to be outside of any cache domain.
1634          */
1635         if (i915_gem_object_is_proxy(obj)) {
1636                 err = -ENXIO;
1637                 goto out;
1638         }
1639
1640         /*
1641          * Flush and acquire obj->pages so that we are coherent through
1642          * direct access in memory with previous cached writes through
1643          * shmemfs and that our cache domain tracking remains valid.
1644          * For example, if the obj->filp was moved to swap without us
1645          * being notified and releasing the pages, we would mistakenly
1646          * continue to assume that the obj remained out of the CPU cached
1647          * domain.
1648          */
1649         err = i915_gem_object_pin_pages(obj);
1650         if (err)
1651                 goto out;
1652
1653         err = i915_mutex_lock_interruptible(dev);
1654         if (err)
1655                 goto out_unpin;
1656
1657         if (read_domains & I915_GEM_DOMAIN_WC)
1658                 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1659         else if (read_domains & I915_GEM_DOMAIN_GTT)
1660                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1661         else
1662                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1663
1664         /* And bump the LRU for this access */
1665         i915_gem_object_bump_inactive_ggtt(obj);
1666
1667         mutex_unlock(&dev->struct_mutex);
1668
1669         if (write_domain != 0)
1670                 intel_fb_obj_invalidate(obj,
1671                                         fb_write_origin(obj, write_domain));
1672
1673 out_unpin:
1674         i915_gem_object_unpin_pages(obj);
1675 out:
1676         i915_gem_object_put(obj);
1677         return err;
1678 }
1679
1680 /**
1681  * Called when user space has done writes to this buffer
1682  * @dev: drm device
1683  * @data: ioctl data blob
1684  * @file: drm file
1685  */
1686 int
1687 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1688                          struct drm_file *file)
1689 {
1690         struct drm_i915_gem_sw_finish *args = data;
1691         struct drm_i915_gem_object *obj;
1692
1693         obj = i915_gem_object_lookup(file, args->handle);
1694         if (!obj)
1695                 return -ENOENT;
1696
1697         /*
1698          * Proxy objects are barred from CPU access, so there is no
1699          * need to ban sw_finish as it is a nop.
1700          */
1701
1702         /* Pinned buffers may be scanout, so flush the cache */
1703         i915_gem_object_flush_if_display(obj);
1704         i915_gem_object_put(obj);
1705
1706         return 0;
1707 }
1708
1709 /**
1710  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1711  *                       it is mapped to.
1712  * @dev: drm device
1713  * @data: ioctl data blob
1714  * @file: drm file
1715  *
1716  * While the mapping holds a reference on the contents of the object, it doesn't
1717  * imply a ref on the object itself.
1718  *
1719  * IMPORTANT:
1720  *
1721  * DRM driver writers who look a this function as an example for how to do GEM
1722  * mmap support, please don't implement mmap support like here. The modern way
1723  * to implement DRM mmap support is with an mmap offset ioctl (like
1724  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1725  * That way debug tooling like valgrind will understand what's going on, hiding
1726  * the mmap call in a driver private ioctl will break that. The i915 driver only
1727  * does cpu mmaps this way because we didn't know better.
1728  */
1729 int
1730 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1731                     struct drm_file *file)
1732 {
1733         struct drm_i915_gem_mmap *args = data;
1734         struct drm_i915_gem_object *obj;
1735         unsigned long addr;
1736
1737         if (args->flags & ~(I915_MMAP_WC))
1738                 return -EINVAL;
1739
1740         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1741                 return -ENODEV;
1742
1743         obj = i915_gem_object_lookup(file, args->handle);
1744         if (!obj)
1745                 return -ENOENT;
1746
1747         /* prime objects have no backing filp to GEM mmap
1748          * pages from.
1749          */
1750         if (!obj->base.filp) {
1751                 i915_gem_object_put(obj);
1752                 return -ENXIO;
1753         }
1754
1755         addr = vm_mmap(obj->base.filp, 0, args->size,
1756                        PROT_READ | PROT_WRITE, MAP_SHARED,
1757                        args->offset);
1758         if (args->flags & I915_MMAP_WC) {
1759                 struct mm_struct *mm = current->mm;
1760                 struct vm_area_struct *vma;
1761
1762                 if (down_write_killable(&mm->mmap_sem)) {
1763                         i915_gem_object_put(obj);
1764                         return -EINTR;
1765                 }
1766                 vma = find_vma(mm, addr);
1767                 if (vma)
1768                         vma->vm_page_prot =
1769                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1770                 else
1771                         addr = -ENOMEM;
1772                 up_write(&mm->mmap_sem);
1773
1774                 /* This may race, but that's ok, it only gets set */
1775                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1776         }
1777         i915_gem_object_put(obj);
1778         if (IS_ERR((void *)addr))
1779                 return addr;
1780
1781         args->addr_ptr = (uint64_t) addr;
1782
1783         return 0;
1784 }
1785
1786 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1787 {
1788         return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1789 }
1790
1791 /**
1792  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1793  *
1794  * A history of the GTT mmap interface:
1795  *
1796  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1797  *     aligned and suitable for fencing, and still fit into the available
1798  *     mappable space left by the pinned display objects. A classic problem
1799  *     we called the page-fault-of-doom where we would ping-pong between
1800  *     two objects that could not fit inside the GTT and so the memcpy
1801  *     would page one object in at the expense of the other between every
1802  *     single byte.
1803  *
1804  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1805  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1806  *     object is too large for the available space (or simply too large
1807  *     for the mappable aperture!), a view is created instead and faulted
1808  *     into userspace. (This view is aligned and sized appropriately for
1809  *     fenced access.)
1810  *
1811  * 2 - Recognise WC as a separate cache domain so that we can flush the
1812  *     delayed writes via GTT before performing direct access via WC.
1813  *
1814  * Restrictions:
1815  *
1816  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1817  *    hangs on some architectures, corruption on others. An attempt to service
1818  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1819  *
1820  *  * the object must be able to fit into RAM (physical memory, though no
1821  *    limited to the mappable aperture).
1822  *
1823  *
1824  * Caveats:
1825  *
1826  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1827  *    all data to system memory. Subsequent access will not be synchronized.
1828  *
1829  *  * all mappings are revoked on runtime device suspend.
1830  *
1831  *  * there are only 8, 16 or 32 fence registers to share between all users
1832  *    (older machines require fence register for display and blitter access
1833  *    as well). Contention of the fence registers will cause the previous users
1834  *    to be unmapped and any new access will generate new page faults.
1835  *
1836  *  * running out of memory while servicing a fault may generate a SIGBUS,
1837  *    rather than the expected SIGSEGV.
1838  */
1839 int i915_gem_mmap_gtt_version(void)
1840 {
1841         return 2;
1842 }
1843
1844 static inline struct i915_ggtt_view
1845 compute_partial_view(struct drm_i915_gem_object *obj,
1846                      pgoff_t page_offset,
1847                      unsigned int chunk)
1848 {
1849         struct i915_ggtt_view view;
1850
1851         if (i915_gem_object_is_tiled(obj))
1852                 chunk = roundup(chunk, tile_row_pages(obj));
1853
1854         view.type = I915_GGTT_VIEW_PARTIAL;
1855         view.partial.offset = rounddown(page_offset, chunk);
1856         view.partial.size =
1857                 min_t(unsigned int, chunk,
1858                       (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1859
1860         /* If the partial covers the entire object, just create a normal VMA. */
1861         if (chunk >= obj->base.size >> PAGE_SHIFT)
1862                 view.type = I915_GGTT_VIEW_NORMAL;
1863
1864         return view;
1865 }
1866
1867 /**
1868  * i915_gem_fault - fault a page into the GTT
1869  * @vmf: fault info
1870  *
1871  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1872  * from userspace.  The fault handler takes care of binding the object to
1873  * the GTT (if needed), allocating and programming a fence register (again,
1874  * only if needed based on whether the old reg is still valid or the object
1875  * is tiled) and inserting a new PTE into the faulting process.
1876  *
1877  * Note that the faulting process may involve evicting existing objects
1878  * from the GTT and/or fence registers to make room.  So performance may
1879  * suffer if the GTT working set is large or there are few fence registers
1880  * left.
1881  *
1882  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1883  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1884  */
1885 int i915_gem_fault(struct vm_fault *vmf)
1886 {
1887 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1888         struct vm_area_struct *area = vmf->vma;
1889         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1890         struct drm_device *dev = obj->base.dev;
1891         struct drm_i915_private *dev_priv = to_i915(dev);
1892         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1893         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1894         struct i915_vma *vma;
1895         pgoff_t page_offset;
1896         unsigned int flags;
1897         int ret;
1898
1899         /* We don't use vmf->pgoff since that has the fake offset */
1900         page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1901
1902         trace_i915_gem_object_fault(obj, page_offset, true, write);
1903
1904         /* Try to flush the object off the GPU first without holding the lock.
1905          * Upon acquiring the lock, we will perform our sanity checks and then
1906          * repeat the flush holding the lock in the normal manner to catch cases
1907          * where we are gazumped.
1908          */
1909         ret = i915_gem_object_wait(obj,
1910                                    I915_WAIT_INTERRUPTIBLE,
1911                                    MAX_SCHEDULE_TIMEOUT,
1912                                    NULL);
1913         if (ret)
1914                 goto err;
1915
1916         ret = i915_gem_object_pin_pages(obj);
1917         if (ret)
1918                 goto err;
1919
1920         intel_runtime_pm_get(dev_priv);
1921
1922         ret = i915_mutex_lock_interruptible(dev);
1923         if (ret)
1924                 goto err_rpm;
1925
1926         /* Access to snoopable pages through the GTT is incoherent. */
1927         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928                 ret = -EFAULT;
1929                 goto err_unlock;
1930         }
1931
1932         /* If the object is smaller than a couple of partial vma, it is
1933          * not worth only creating a single partial vma - we may as well
1934          * clear enough space for the full object.
1935          */
1936         flags = PIN_MAPPABLE;
1937         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1938                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1939
1940         /* Now pin it into the GTT as needed */
1941         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1942         if (IS_ERR(vma)) {
1943                 /* Use a partial view if it is bigger than available space */
1944                 struct i915_ggtt_view view =
1945                         compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1946
1947                 /* Userspace is now writing through an untracked VMA, abandon
1948                  * all hope that the hardware is able to track future writes.
1949                  */
1950                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1951
1952                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1953         }
1954         if (IS_ERR(vma)) {
1955                 ret = PTR_ERR(vma);
1956                 goto err_unlock;
1957         }
1958
1959         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1960         if (ret)
1961                 goto err_unpin;
1962
1963         ret = i915_vma_pin_fence(vma);
1964         if (ret)
1965                 goto err_unpin;
1966
1967         /* Finally, remap it using the new GTT offset */
1968         ret = remap_io_mapping(area,
1969                                area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1970                                (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1971                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1972                                &ggtt->iomap);
1973         if (ret)
1974                 goto err_fence;
1975
1976         /* Mark as being mmapped into userspace for later revocation */
1977         assert_rpm_wakelock_held(dev_priv);
1978         if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1979                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1980         GEM_BUG_ON(!obj->userfault_count);
1981
1982         i915_vma_set_ggtt_write(vma);
1983
1984 err_fence:
1985         i915_vma_unpin_fence(vma);
1986 err_unpin:
1987         __i915_vma_unpin(vma);
1988 err_unlock:
1989         mutex_unlock(&dev->struct_mutex);
1990 err_rpm:
1991         intel_runtime_pm_put(dev_priv);
1992         i915_gem_object_unpin_pages(obj);
1993 err:
1994         switch (ret) {
1995         case -EIO:
1996                 /*
1997                  * We eat errors when the gpu is terminally wedged to avoid
1998                  * userspace unduly crashing (gl has no provisions for mmaps to
1999                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
2000                  * and so needs to be reported.
2001                  */
2002                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2003                         ret = VM_FAULT_SIGBUS;
2004                         break;
2005                 }
2006         case -EAGAIN:
2007                 /*
2008                  * EAGAIN means the gpu is hung and we'll wait for the error
2009                  * handler to reset everything when re-faulting in
2010                  * i915_mutex_lock_interruptible.
2011                  */
2012         case 0:
2013         case -ERESTARTSYS:
2014         case -EINTR:
2015         case -EBUSY:
2016                 /*
2017                  * EBUSY is ok: this just means that another thread
2018                  * already did the job.
2019                  */
2020                 ret = VM_FAULT_NOPAGE;
2021                 break;
2022         case -ENOMEM:
2023                 ret = VM_FAULT_OOM;
2024                 break;
2025         case -ENOSPC:
2026         case -EFAULT:
2027                 ret = VM_FAULT_SIGBUS;
2028                 break;
2029         default:
2030                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2031                 ret = VM_FAULT_SIGBUS;
2032                 break;
2033         }
2034         return ret;
2035 }
2036
2037 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2038 {
2039         struct i915_vma *vma;
2040
2041         GEM_BUG_ON(!obj->userfault_count);
2042
2043         obj->userfault_count = 0;
2044         list_del(&obj->userfault_link);
2045         drm_vma_node_unmap(&obj->base.vma_node,
2046                            obj->base.dev->anon_inode->i_mapping);
2047
2048         for_each_ggtt_vma(vma, obj)
2049                 i915_vma_unset_userfault(vma);
2050 }
2051
2052 /**
2053  * i915_gem_release_mmap - remove physical page mappings
2054  * @obj: obj in question
2055  *
2056  * Preserve the reservation of the mmapping with the DRM core code, but
2057  * relinquish ownership of the pages back to the system.
2058  *
2059  * It is vital that we remove the page mapping if we have mapped a tiled
2060  * object through the GTT and then lose the fence register due to
2061  * resource pressure. Similarly if the object has been moved out of the
2062  * aperture, than pages mapped into userspace must be revoked. Removing the
2063  * mapping will then trigger a page fault on the next user access, allowing
2064  * fixup by i915_gem_fault().
2065  */
2066 void
2067 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2068 {
2069         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2070
2071         /* Serialisation between user GTT access and our code depends upon
2072          * revoking the CPU's PTE whilst the mutex is held. The next user
2073          * pagefault then has to wait until we release the mutex.
2074          *
2075          * Note that RPM complicates somewhat by adding an additional
2076          * requirement that operations to the GGTT be made holding the RPM
2077          * wakeref.
2078          */
2079         lockdep_assert_held(&i915->drm.struct_mutex);
2080         intel_runtime_pm_get(i915);
2081
2082         if (!obj->userfault_count)
2083                 goto out;
2084
2085         __i915_gem_object_release_mmap(obj);
2086
2087         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2088          * memory transactions from userspace before we return. The TLB
2089          * flushing implied above by changing the PTE above *should* be
2090          * sufficient, an extra barrier here just provides us with a bit
2091          * of paranoid documentation about our requirement to serialise
2092          * memory writes before touching registers / GSM.
2093          */
2094         wmb();
2095
2096 out:
2097         intel_runtime_pm_put(i915);
2098 }
2099
2100 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2101 {
2102         struct drm_i915_gem_object *obj, *on;
2103         int i;
2104
2105         /*
2106          * Only called during RPM suspend. All users of the userfault_list
2107          * must be holding an RPM wakeref to ensure that this can not
2108          * run concurrently with themselves (and use the struct_mutex for
2109          * protection between themselves).
2110          */
2111
2112         list_for_each_entry_safe(obj, on,
2113                                  &dev_priv->mm.userfault_list, userfault_link)
2114                 __i915_gem_object_release_mmap(obj);
2115
2116         /* The fence will be lost when the device powers down. If any were
2117          * in use by hardware (i.e. they are pinned), we should not be powering
2118          * down! All other fences will be reacquired by the user upon waking.
2119          */
2120         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2121                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2122
2123                 /* Ideally we want to assert that the fence register is not
2124                  * live at this point (i.e. that no piece of code will be
2125                  * trying to write through fence + GTT, as that both violates
2126                  * our tracking of activity and associated locking/barriers,
2127                  * but also is illegal given that the hw is powered down).
2128                  *
2129                  * Previously we used reg->pin_count as a "liveness" indicator.
2130                  * That is not sufficient, and we need a more fine-grained
2131                  * tool if we want to have a sanity check here.
2132                  */
2133
2134                 if (!reg->vma)
2135                         continue;
2136
2137                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2138                 reg->dirty = true;
2139         }
2140 }
2141
2142 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2143 {
2144         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2145         int err;
2146
2147         err = drm_gem_create_mmap_offset(&obj->base);
2148         if (likely(!err))
2149                 return 0;
2150
2151         /* Attempt to reap some mmap space from dead objects */
2152         do {
2153                 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2154                 if (err)
2155                         break;
2156
2157                 i915_gem_drain_freed_objects(dev_priv);
2158                 err = drm_gem_create_mmap_offset(&obj->base);
2159                 if (!err)
2160                         break;
2161
2162         } while (flush_delayed_work(&dev_priv->gt.retire_work));
2163
2164         return err;
2165 }
2166
2167 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2168 {
2169         drm_gem_free_mmap_offset(&obj->base);
2170 }
2171
2172 int
2173 i915_gem_mmap_gtt(struct drm_file *file,
2174                   struct drm_device *dev,
2175                   uint32_t handle,
2176                   uint64_t *offset)
2177 {
2178         struct drm_i915_gem_object *obj;
2179         int ret;
2180
2181         obj = i915_gem_object_lookup(file, handle);
2182         if (!obj)
2183                 return -ENOENT;
2184
2185         ret = i915_gem_object_create_mmap_offset(obj);
2186         if (ret == 0)
2187                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188
2189         i915_gem_object_put(obj);
2190         return ret;
2191 }
2192
2193 /**
2194  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2195  * @dev: DRM device
2196  * @data: GTT mapping ioctl data
2197  * @file: GEM object info
2198  *
2199  * Simply returns the fake offset to userspace so it can mmap it.
2200  * The mmap call will end up in drm_gem_mmap(), which will set things
2201  * up so we can get faults in the handler above.
2202  *
2203  * The fault handler will take care of binding the object into the GTT
2204  * (since it may have been evicted to make room for something), allocating
2205  * a fence register, and mapping the appropriate aperture address into
2206  * userspace.
2207  */
2208 int
2209 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2210                         struct drm_file *file)
2211 {
2212         struct drm_i915_gem_mmap_gtt *args = data;
2213
2214         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2215 }
2216
2217 /* Immediately discard the backing storage */
2218 static void
2219 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220 {
2221         i915_gem_object_free_mmap_offset(obj);
2222
2223         if (obj->base.filp == NULL)
2224                 return;
2225
2226         /* Our goal here is to return as much of the memory as
2227          * is possible back to the system as we are called from OOM.
2228          * To do this we must instruct the shmfs to drop all of its
2229          * backing pages, *now*.
2230          */
2231         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2232         obj->mm.madv = __I915_MADV_PURGED;
2233         obj->mm.pages = ERR_PTR(-EFAULT);
2234 }
2235
2236 /* Try to discard unwanted pages */
2237 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2238 {
2239         struct address_space *mapping;
2240
2241         lockdep_assert_held(&obj->mm.lock);
2242         GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243
2244         switch (obj->mm.madv) {
2245         case I915_MADV_DONTNEED:
2246                 i915_gem_object_truncate(obj);
2247         case __I915_MADV_PURGED:
2248                 return;
2249         }
2250
2251         if (obj->base.filp == NULL)
2252                 return;
2253
2254         mapping = obj->base.filp->f_mapping,
2255         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2256 }
2257
2258 static void
2259 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2260                               struct sg_table *pages)
2261 {
2262         struct sgt_iter sgt_iter;
2263         struct page *page;
2264
2265         __i915_gem_object_release_shmem(obj, pages, true);
2266
2267         i915_gem_gtt_finish_pages(obj, pages);
2268
2269         if (i915_gem_object_needs_bit17_swizzle(obj))
2270                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2271
2272         for_each_sgt_page(page, sgt_iter, pages) {
2273                 if (obj->mm.dirty)
2274                         set_page_dirty(page);
2275
2276                 if (obj->mm.madv == I915_MADV_WILLNEED)
2277                         mark_page_accessed(page);
2278
2279                 put_page(page);
2280         }
2281         obj->mm.dirty = false;
2282
2283         sg_free_table(pages);
2284         kfree(pages);
2285 }
2286
2287 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2288 {
2289         struct radix_tree_iter iter;
2290         void __rcu **slot;
2291
2292         rcu_read_lock();
2293         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2294                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2295         rcu_read_unlock();
2296 }
2297
2298 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2299                                  enum i915_mm_subclass subclass)
2300 {
2301         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2302         struct sg_table *pages;
2303
2304         if (i915_gem_object_has_pinned_pages(obj))
2305                 return;
2306
2307         GEM_BUG_ON(obj->bind_count);
2308         if (!i915_gem_object_has_pages(obj))
2309                 return;
2310
2311         /* May be called by shrinker from within get_pages() (on another bo) */
2312         mutex_lock_nested(&obj->mm.lock, subclass);
2313         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2314                 goto unlock;
2315
2316         /* ->put_pages might need to allocate memory for the bit17 swizzle
2317          * array, hence protect them from being reaped by removing them from gtt
2318          * lists early. */
2319         pages = fetch_and_zero(&obj->mm.pages);
2320         GEM_BUG_ON(!pages);
2321
2322         spin_lock(&i915->mm.obj_lock);
2323         list_del(&obj->mm.link);
2324         spin_unlock(&i915->mm.obj_lock);
2325
2326         if (obj->mm.mapping) {
2327                 void *ptr;
2328
2329                 ptr = page_mask_bits(obj->mm.mapping);
2330                 if (is_vmalloc_addr(ptr))
2331                         vunmap(ptr);
2332                 else
2333                         kunmap(kmap_to_page(ptr));
2334
2335                 obj->mm.mapping = NULL;
2336         }
2337
2338         __i915_gem_object_reset_page_iter(obj);
2339
2340         if (!IS_ERR(pages))
2341                 obj->ops->put_pages(obj, pages);
2342
2343         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2344
2345 unlock:
2346         mutex_unlock(&obj->mm.lock);
2347 }
2348
2349 static bool i915_sg_trim(struct sg_table *orig_st)
2350 {
2351         struct sg_table new_st;
2352         struct scatterlist *sg, *new_sg;
2353         unsigned int i;
2354
2355         if (orig_st->nents == orig_st->orig_nents)
2356                 return false;
2357
2358         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2359                 return false;
2360
2361         new_sg = new_st.sgl;
2362         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2363                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2364                 /* called before being DMA mapped, no need to copy sg->dma_* */
2365                 new_sg = sg_next(new_sg);
2366         }
2367         GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2368
2369         sg_free_table(orig_st);
2370
2371         *orig_st = new_st;
2372         return true;
2373 }
2374
2375 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2376 {
2377         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2378         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2379         unsigned long i;
2380         struct address_space *mapping;
2381         struct sg_table *st;
2382         struct scatterlist *sg;
2383         struct sgt_iter sgt_iter;
2384         struct page *page;
2385         unsigned long last_pfn = 0;     /* suppress gcc warning */
2386         unsigned int max_segment = i915_sg_segment_size();
2387         unsigned int sg_page_sizes;
2388         gfp_t noreclaim;
2389         int ret;
2390
2391         /* Assert that the object is not currently in any GPU domain. As it
2392          * wasn't in the GTT, there shouldn't be any way it could have been in
2393          * a GPU cache
2394          */
2395         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2396         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2397
2398         st = kmalloc(sizeof(*st), GFP_KERNEL);
2399         if (st == NULL)
2400                 return -ENOMEM;
2401
2402 rebuild_st:
2403         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2404                 kfree(st);
2405                 return -ENOMEM;
2406         }
2407
2408         /* Get the list of pages out of our struct file.  They'll be pinned
2409          * at this point until we release them.
2410          *
2411          * Fail silently without starting the shrinker
2412          */
2413         mapping = obj->base.filp->f_mapping;
2414         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2415         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2416
2417         sg = st->sgl;
2418         st->nents = 0;
2419         sg_page_sizes = 0;
2420         for (i = 0; i < page_count; i++) {
2421                 const unsigned int shrink[] = {
2422                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2423                         0,
2424                 }, *s = shrink;
2425                 gfp_t gfp = noreclaim;
2426
2427                 do {
2428                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2429                         if (likely(!IS_ERR(page)))
2430                                 break;
2431
2432                         if (!*s) {
2433                                 ret = PTR_ERR(page);
2434                                 goto err_sg;
2435                         }
2436
2437                         i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2438                         cond_resched();
2439
2440                         /* We've tried hard to allocate the memory by reaping
2441                          * our own buffer, now let the real VM do its job and
2442                          * go down in flames if truly OOM.
2443                          *
2444                          * However, since graphics tend to be disposable,
2445                          * defer the oom here by reporting the ENOMEM back
2446                          * to userspace.
2447                          */
2448                         if (!*s) {
2449                                 /* reclaim and warn, but no oom */
2450                                 gfp = mapping_gfp_mask(mapping);
2451
2452                                 /* Our bo are always dirty and so we require
2453                                  * kswapd to reclaim our pages (direct reclaim
2454                                  * does not effectively begin pageout of our
2455                                  * buffers on its own). However, direct reclaim
2456                                  * only waits for kswapd when under allocation
2457                                  * congestion. So as a result __GFP_RECLAIM is
2458                                  * unreliable and fails to actually reclaim our
2459                                  * dirty pages -- unless you try over and over
2460                                  * again with !__GFP_NORETRY. However, we still
2461                                  * want to fail this allocation rather than
2462                                  * trigger the out-of-memory killer and for
2463                                  * this we want __GFP_RETRY_MAYFAIL.
2464                                  */
2465                                 gfp |= __GFP_RETRY_MAYFAIL;
2466                         }
2467                 } while (1);
2468
2469                 if (!i ||
2470                     sg->length >= max_segment ||
2471                     page_to_pfn(page) != last_pfn + 1) {
2472                         if (i) {
2473                                 sg_page_sizes |= sg->length;
2474                                 sg = sg_next(sg);
2475                         }
2476                         st->nents++;
2477                         sg_set_page(sg, page, PAGE_SIZE, 0);
2478                 } else {
2479                         sg->length += PAGE_SIZE;
2480                 }
2481                 last_pfn = page_to_pfn(page);
2482
2483                 /* Check that the i965g/gm workaround works. */
2484                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2485         }
2486         if (sg) { /* loop terminated early; short sg table */
2487                 sg_page_sizes |= sg->length;
2488                 sg_mark_end(sg);
2489         }
2490
2491         /* Trim unused sg entries to avoid wasting memory. */
2492         i915_sg_trim(st);
2493
2494         ret = i915_gem_gtt_prepare_pages(obj, st);
2495         if (ret) {
2496                 /* DMA remapping failed? One possible cause is that
2497                  * it could not reserve enough large entries, asking
2498                  * for PAGE_SIZE chunks instead may be helpful.
2499                  */
2500                 if (max_segment > PAGE_SIZE) {
2501                         for_each_sgt_page(page, sgt_iter, st)
2502                                 put_page(page);
2503                         sg_free_table(st);
2504
2505                         max_segment = PAGE_SIZE;
2506                         goto rebuild_st;
2507                 } else {
2508                         dev_warn(&dev_priv->drm.pdev->dev,
2509                                  "Failed to DMA remap %lu pages\n",
2510                                  page_count);
2511                         goto err_pages;
2512                 }
2513         }
2514
2515         if (i915_gem_object_needs_bit17_swizzle(obj))
2516                 i915_gem_object_do_bit_17_swizzle(obj, st);
2517
2518         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2519
2520         return 0;
2521
2522 err_sg:
2523         sg_mark_end(sg);
2524 err_pages:
2525         for_each_sgt_page(page, sgt_iter, st)
2526                 put_page(page);
2527         sg_free_table(st);
2528         kfree(st);
2529
2530         /* shmemfs first checks if there is enough memory to allocate the page
2531          * and reports ENOSPC should there be insufficient, along with the usual
2532          * ENOMEM for a genuine allocation failure.
2533          *
2534          * We use ENOSPC in our driver to mean that we have run out of aperture
2535          * space and so want to translate the error from shmemfs back to our
2536          * usual understanding of ENOMEM.
2537          */
2538         if (ret == -ENOSPC)
2539                 ret = -ENOMEM;
2540
2541         return ret;
2542 }
2543
2544 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2545                                  struct sg_table *pages,
2546                                  unsigned int sg_page_sizes)
2547 {
2548         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2549         unsigned long supported = INTEL_INFO(i915)->page_sizes;
2550         int i;
2551
2552         lockdep_assert_held(&obj->mm.lock);
2553
2554         obj->mm.get_page.sg_pos = pages->sgl;
2555         obj->mm.get_page.sg_idx = 0;
2556
2557         obj->mm.pages = pages;
2558
2559         if (i915_gem_object_is_tiled(obj) &&
2560             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2561                 GEM_BUG_ON(obj->mm.quirked);
2562                 __i915_gem_object_pin_pages(obj);
2563                 obj->mm.quirked = true;
2564         }
2565
2566         GEM_BUG_ON(!sg_page_sizes);
2567         obj->mm.page_sizes.phys = sg_page_sizes;
2568
2569         /*
2570          * Calculate the supported page-sizes which fit into the given
2571          * sg_page_sizes. This will give us the page-sizes which we may be able
2572          * to use opportunistically when later inserting into the GTT. For
2573          * example if phys=2G, then in theory we should be able to use 1G, 2M,
2574          * 64K or 4K pages, although in practice this will depend on a number of
2575          * other factors.
2576          */
2577         obj->mm.page_sizes.sg = 0;
2578         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2579                 if (obj->mm.page_sizes.phys & ~0u << i)
2580                         obj->mm.page_sizes.sg |= BIT(i);
2581         }
2582         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583
2584         spin_lock(&i915->mm.obj_lock);
2585         list_add(&obj->mm.link, &i915->mm.unbound_list);
2586         spin_unlock(&i915->mm.obj_lock);
2587 }
2588
2589 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2590 {
2591         int err;
2592
2593         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2594                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2595                 return -EFAULT;
2596         }
2597
2598         err = obj->ops->get_pages(obj);
2599         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2600
2601         return err;
2602 }
2603
2604 /* Ensure that the associated pages are gathered from the backing storage
2605  * and pinned into our object. i915_gem_object_pin_pages() may be called
2606  * multiple times before they are released by a single call to
2607  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2608  * either as a result of memory pressure (reaping pages under the shrinker)
2609  * or as the object is itself released.
2610  */
2611 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2612 {
2613         int err;
2614
2615         err = mutex_lock_interruptible(&obj->mm.lock);
2616         if (err)
2617                 return err;
2618
2619         if (unlikely(!i915_gem_object_has_pages(obj))) {
2620                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2621
2622                 err = ____i915_gem_object_get_pages(obj);
2623                 if (err)
2624                         goto unlock;
2625
2626                 smp_mb__before_atomic();
2627         }
2628         atomic_inc(&obj->mm.pages_pin_count);
2629
2630 unlock:
2631         mutex_unlock(&obj->mm.lock);
2632         return err;
2633 }
2634
2635 /* The 'mapping' part of i915_gem_object_pin_map() below */
2636 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2637                                  enum i915_map_type type)
2638 {
2639         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2640         struct sg_table *sgt = obj->mm.pages;
2641         struct sgt_iter sgt_iter;
2642         struct page *page;
2643         struct page *stack_pages[32];
2644         struct page **pages = stack_pages;
2645         unsigned long i = 0;
2646         pgprot_t pgprot;
2647         void *addr;
2648
2649         /* A single page can always be kmapped */
2650         if (n_pages == 1 && type == I915_MAP_WB)
2651                 return kmap(sg_page(sgt->sgl));
2652
2653         if (n_pages > ARRAY_SIZE(stack_pages)) {
2654                 /* Too big for stack -- allocate temporary array instead */
2655                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2656                 if (!pages)
2657                         return NULL;
2658         }
2659
2660         for_each_sgt_page(page, sgt_iter, sgt)
2661                 pages[i++] = page;
2662
2663         /* Check that we have the expected number of pages */
2664         GEM_BUG_ON(i != n_pages);
2665
2666         switch (type) {
2667         default:
2668                 MISSING_CASE(type);
2669                 /* fallthrough to use PAGE_KERNEL anyway */
2670         case I915_MAP_WB:
2671                 pgprot = PAGE_KERNEL;
2672                 break;
2673         case I915_MAP_WC:
2674                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2675                 break;
2676         }
2677         addr = vmap(pages, n_pages, 0, pgprot);
2678
2679         if (pages != stack_pages)
2680                 kvfree(pages);
2681
2682         return addr;
2683 }
2684
2685 /* get, pin, and map the pages of the object into kernel space */
2686 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2687                               enum i915_map_type type)
2688 {
2689         enum i915_map_type has_type;
2690         bool pinned;
2691         void *ptr;
2692         int ret;
2693
2694         if (unlikely(!i915_gem_object_has_struct_page(obj)))
2695                 return ERR_PTR(-ENXIO);
2696
2697         ret = mutex_lock_interruptible(&obj->mm.lock);
2698         if (ret)
2699                 return ERR_PTR(ret);
2700
2701         pinned = !(type & I915_MAP_OVERRIDE);
2702         type &= ~I915_MAP_OVERRIDE;
2703
2704         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2705                 if (unlikely(!i915_gem_object_has_pages(obj))) {
2706                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2707
2708                         ret = ____i915_gem_object_get_pages(obj);
2709                         if (ret)
2710                                 goto err_unlock;
2711
2712                         smp_mb__before_atomic();
2713                 }
2714                 atomic_inc(&obj->mm.pages_pin_count);
2715                 pinned = false;
2716         }
2717         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718
2719         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2720         if (ptr && has_type != type) {
2721                 if (pinned) {
2722                         ret = -EBUSY;
2723                         goto err_unpin;
2724                 }
2725
2726                 if (is_vmalloc_addr(ptr))
2727                         vunmap(ptr);
2728                 else
2729                         kunmap(kmap_to_page(ptr));
2730
2731                 ptr = obj->mm.mapping = NULL;
2732         }
2733
2734         if (!ptr) {
2735                 ptr = i915_gem_object_map(obj, type);
2736                 if (!ptr) {
2737                         ret = -ENOMEM;
2738                         goto err_unpin;
2739                 }
2740
2741                 obj->mm.mapping = page_pack_bits(ptr, type);
2742         }
2743
2744 out_unlock:
2745         mutex_unlock(&obj->mm.lock);
2746         return ptr;
2747
2748 err_unpin:
2749         atomic_dec(&obj->mm.pages_pin_count);
2750 err_unlock:
2751         ptr = ERR_PTR(ret);
2752         goto out_unlock;
2753 }
2754
2755 static int
2756 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2757                            const struct drm_i915_gem_pwrite *arg)
2758 {
2759         struct address_space *mapping = obj->base.filp->f_mapping;
2760         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2761         u64 remain, offset;
2762         unsigned int pg;
2763
2764         /* Before we instantiate/pin the backing store for our use, we
2765          * can prepopulate the shmemfs filp efficiently using a write into
2766          * the pagecache. We avoid the penalty of instantiating all the
2767          * pages, important if the user is just writing to a few and never
2768          * uses the object on the GPU, and using a direct write into shmemfs
2769          * allows it to avoid the cost of retrieving a page (either swapin
2770          * or clearing-before-use) before it is overwritten.
2771          */
2772         if (i915_gem_object_has_pages(obj))
2773                 return -ENODEV;
2774
2775         if (obj->mm.madv != I915_MADV_WILLNEED)
2776                 return -EFAULT;
2777
2778         /* Before the pages are instantiated the object is treated as being
2779          * in the CPU domain. The pages will be clflushed as required before
2780          * use, and we can freely write into the pages directly. If userspace
2781          * races pwrite with any other operation; corruption will ensue -
2782          * that is userspace's prerogative!
2783          */
2784
2785         remain = arg->size;
2786         offset = arg->offset;
2787         pg = offset_in_page(offset);
2788
2789         do {
2790                 unsigned int len, unwritten;
2791                 struct page *page;
2792                 void *data, *vaddr;
2793                 int err;
2794
2795                 len = PAGE_SIZE - pg;
2796                 if (len > remain)
2797                         len = remain;
2798
2799                 err = pagecache_write_begin(obj->base.filp, mapping,
2800                                             offset, len, 0,
2801                                             &page, &data);
2802                 if (err < 0)
2803                         return err;
2804
2805                 vaddr = kmap(page);
2806                 unwritten = copy_from_user(vaddr + pg, user_data, len);
2807                 kunmap(page);
2808
2809                 err = pagecache_write_end(obj->base.filp, mapping,
2810                                           offset, len, len - unwritten,
2811                                           page, data);
2812                 if (err < 0)
2813                         return err;
2814
2815                 if (unwritten)
2816                         return -EFAULT;
2817
2818                 remain -= len;
2819                 user_data += len;
2820                 offset += len;
2821                 pg = 0;
2822         } while (remain);
2823
2824         return 0;
2825 }
2826
2827 static bool ban_context(const struct i915_gem_context *ctx,
2828                         unsigned int score)
2829 {
2830         return (i915_gem_context_is_bannable(ctx) &&
2831                 score >= CONTEXT_SCORE_BAN_THRESHOLD);
2832 }
2833
2834 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2835 {
2836         unsigned int score;
2837         bool banned;
2838
2839         atomic_inc(&ctx->guilty_count);
2840
2841         score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2842         banned = ban_context(ctx, score);
2843         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2844                          ctx->name, score, yesno(banned));
2845         if (!banned)
2846                 return;
2847
2848         i915_gem_context_set_banned(ctx);
2849         if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2850                 atomic_inc(&ctx->file_priv->context_bans);
2851                 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2852                                  ctx->name, atomic_read(&ctx->file_priv->context_bans));
2853         }
2854 }
2855
2856 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2857 {
2858         atomic_inc(&ctx->active_count);
2859 }
2860
2861 struct drm_i915_gem_request *
2862 i915_gem_find_active_request(struct intel_engine_cs *engine)
2863 {
2864         struct drm_i915_gem_request *request, *active = NULL;
2865         unsigned long flags;
2866
2867         /* We are called by the error capture and reset at a random
2868          * point in time. In particular, note that neither is crucially
2869          * ordered with an interrupt. After a hang, the GPU is dead and we
2870          * assume that no more writes can happen (we waited long enough for
2871          * all writes that were in transaction to be flushed) - adding an
2872          * extra delay for a recent interrupt is pointless. Hence, we do
2873          * not need an engine->irq_seqno_barrier() before the seqno reads.
2874          */
2875         spin_lock_irqsave(&engine->timeline->lock, flags);
2876         list_for_each_entry(request, &engine->timeline->requests, link) {
2877                 if (__i915_gem_request_completed(request,
2878                                                  request->global_seqno))
2879                         continue;
2880
2881                 GEM_BUG_ON(request->engine != engine);
2882                 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2883                                     &request->fence.flags));
2884
2885                 active = request;
2886                 break;
2887         }
2888         spin_unlock_irqrestore(&engine->timeline->lock, flags);
2889
2890         return active;
2891 }
2892
2893 static bool engine_stalled(struct intel_engine_cs *engine)
2894 {
2895         if (!engine->hangcheck.stalled)
2896                 return false;
2897
2898         /* Check for possible seqno movement after hang declaration */
2899         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2900                 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2901                 return false;
2902         }
2903
2904         return true;
2905 }
2906
2907 /*
2908  * Ensure irq handler finishes, and not run again.
2909  * Also return the active request so that we only search for it once.
2910  */
2911 struct drm_i915_gem_request *
2912 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2913 {
2914         struct drm_i915_gem_request *request = NULL;
2915
2916         /*
2917          * During the reset sequence, we must prevent the engine from
2918          * entering RC6. As the context state is undefined until we restart
2919          * the engine, if it does enter RC6 during the reset, the state
2920          * written to the powercontext is undefined and so we may lose
2921          * GPU state upon resume, i.e. fail to restart after a reset.
2922          */
2923         intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2924
2925         /*
2926          * Prevent the signaler thread from updating the request
2927          * state (by calling dma_fence_signal) as we are processing
2928          * the reset. The write from the GPU of the seqno is
2929          * asynchronous and the signaler thread may see a different
2930          * value to us and declare the request complete, even though
2931          * the reset routine have picked that request as the active
2932          * (incomplete) request. This conflict is not handled
2933          * gracefully!
2934          */
2935         kthread_park(engine->breadcrumbs.signaler);
2936
2937         /*
2938          * Prevent request submission to the hardware until we have
2939          * completed the reset in i915_gem_reset_finish(). If a request
2940          * is completed by one engine, it may then queue a request
2941          * to a second via its execlists->tasklet *just* as we are
2942          * calling engine->init_hw() and also writing the ELSP.
2943          * Turning off the execlists->tasklet until the reset is over
2944          * prevents the race.
2945          */
2946         tasklet_kill(&engine->execlists.tasklet);
2947         tasklet_disable(&engine->execlists.tasklet);
2948
2949         /*
2950          * We're using worker to queue preemption requests from the tasklet in
2951          * GuC submission mode.
2952          * Even though tasklet was disabled, we may still have a worker queued.
2953          * Let's make sure that all workers scheduled before disabling the
2954          * tasklet are completed before continuing with the reset.
2955          */
2956         if (engine->i915->guc.preempt_wq)
2957                 flush_workqueue(engine->i915->guc.preempt_wq);
2958
2959         if (engine->irq_seqno_barrier)
2960                 engine->irq_seqno_barrier(engine);
2961
2962         request = i915_gem_find_active_request(engine);
2963         if (request && request->fence.error == -EIO)
2964                 request = ERR_PTR(-EIO); /* Previous reset failed! */
2965
2966         return request;
2967 }
2968
2969 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2970 {
2971         struct intel_engine_cs *engine;
2972         struct drm_i915_gem_request *request;
2973         enum intel_engine_id id;
2974         int err = 0;
2975
2976         for_each_engine(engine, dev_priv, id) {
2977                 request = i915_gem_reset_prepare_engine(engine);
2978                 if (IS_ERR(request)) {
2979                         err = PTR_ERR(request);
2980                         continue;
2981                 }
2982
2983                 engine->hangcheck.active_request = request;
2984         }
2985
2986         i915_gem_revoke_fences(dev_priv);
2987
2988         return err;
2989 }
2990
2991 static void skip_request(struct drm_i915_gem_request *request)
2992 {
2993         void *vaddr = request->ring->vaddr;
2994         u32 head;
2995
2996         /* As this request likely depends on state from the lost
2997          * context, clear out all the user operations leaving the
2998          * breadcrumb at the end (so we get the fence notifications).
2999          */
3000         head = request->head;
3001         if (request->postfix < head) {
3002                 memset(vaddr + head, 0, request->ring->size - head);
3003                 head = 0;
3004         }
3005         memset(vaddr + head, 0, request->postfix - head);
3006
3007         dma_fence_set_error(&request->fence, -EIO);
3008 }
3009
3010 static void engine_skip_context(struct drm_i915_gem_request *request)
3011 {
3012         struct intel_engine_cs *engine = request->engine;
3013         struct i915_gem_context *hung_ctx = request->ctx;
3014         struct intel_timeline *timeline;
3015         unsigned long flags;
3016
3017         timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
3018
3019         spin_lock_irqsave(&engine->timeline->lock, flags);
3020         spin_lock(&timeline->lock);
3021
3022         list_for_each_entry_continue(request, &engine->timeline->requests, link)
3023                 if (request->ctx == hung_ctx)
3024                         skip_request(request);
3025
3026         list_for_each_entry(request, &timeline->requests, link)
3027                 skip_request(request);
3028
3029         spin_unlock(&timeline->lock);
3030         spin_unlock_irqrestore(&engine->timeline->lock, flags);
3031 }
3032
3033 /* Returns the request if it was guilty of the hang */
3034 static struct drm_i915_gem_request *
3035 i915_gem_reset_request(struct intel_engine_cs *engine,
3036                        struct drm_i915_gem_request *request)
3037 {
3038         /* The guilty request will get skipped on a hung engine.
3039          *
3040          * Users of client default contexts do not rely on logical
3041          * state preserved between batches so it is safe to execute
3042          * queued requests following the hang. Non default contexts
3043          * rely on preserved state, so skipping a batch loses the
3044          * evolution of the state and it needs to be considered corrupted.
3045          * Executing more queued batches on top of corrupted state is
3046          * risky. But we take the risk by trying to advance through
3047          * the queued requests in order to make the client behaviour
3048          * more predictable around resets, by not throwing away random
3049          * amount of batches it has prepared for execution. Sophisticated
3050          * clients can use gem_reset_stats_ioctl and dma fence status
3051          * (exported via sync_file info ioctl on explicit fences) to observe
3052          * when it loses the context state and should rebuild accordingly.
3053          *
3054          * The context ban, and ultimately the client ban, mechanism are safety
3055          * valves if client submission ends up resulting in nothing more than
3056          * subsequent hangs.
3057          */
3058
3059         if (engine_stalled(engine)) {
3060                 i915_gem_context_mark_guilty(request->ctx);
3061                 skip_request(request);
3062
3063                 /* If this context is now banned, skip all pending requests. */
3064                 if (i915_gem_context_is_banned(request->ctx))
3065                         engine_skip_context(request);
3066         } else {
3067                 /*
3068                  * Since this is not the hung engine, it may have advanced
3069                  * since the hang declaration. Double check by refinding
3070                  * the active request at the time of the reset.
3071                  */
3072                 request = i915_gem_find_active_request(engine);
3073                 if (request) {
3074                         i915_gem_context_mark_innocent(request->ctx);
3075                         dma_fence_set_error(&request->fence, -EAGAIN);
3076
3077                         /* Rewind the engine to replay the incomplete rq */
3078                         spin_lock_irq(&engine->timeline->lock);
3079                         request = list_prev_entry(request, link);
3080                         if (&request->link == &engine->timeline->requests)
3081                                 request = NULL;
3082                         spin_unlock_irq(&engine->timeline->lock);
3083                 }
3084         }
3085
3086         return request;
3087 }
3088
3089 void i915_gem_reset_engine(struct intel_engine_cs *engine,
3090                            struct drm_i915_gem_request *request)
3091 {
3092         /*
3093          * Make sure this write is visible before we re-enable the interrupt
3094          * handlers on another CPU, as tasklet_enable() resolves to just
3095          * a compiler barrier which is insufficient for our purpose here.
3096          */
3097         smp_store_mb(engine->irq_posted, 0);
3098
3099         if (request)
3100                 request = i915_gem_reset_request(engine, request);
3101
3102         if (request) {
3103                 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3104                                  engine->name, request->global_seqno);
3105         }
3106
3107         /* Setup the CS to resume from the breadcrumb of the hung request */
3108         engine->reset_hw(engine, request);
3109 }
3110
3111 void i915_gem_reset(struct drm_i915_private *dev_priv)
3112 {
3113         struct intel_engine_cs *engine;
3114         enum intel_engine_id id;
3115
3116         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3117
3118         i915_gem_retire_requests(dev_priv);
3119
3120         for_each_engine(engine, dev_priv, id) {
3121                 struct i915_gem_context *ctx;
3122
3123                 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3124                 ctx = fetch_and_zero(&engine->last_retired_context);
3125                 if (ctx)
3126                         engine->context_unpin(engine, ctx);
3127
3128                 /*
3129                  * Ostensibily, we always want a context loaded for powersaving,
3130                  * so if the engine is idle after the reset, send a request
3131                  * to load our scratch kernel_context.
3132                  *
3133                  * More mysteriously, if we leave the engine idle after a reset,
3134                  * the next userspace batch may hang, with what appears to be
3135                  * an incoherent read by the CS (presumably stale TLB). An
3136                  * empty request appears sufficient to paper over the glitch.
3137                  */
3138                 if (list_empty(&engine->timeline->requests)) {
3139                         struct drm_i915_gem_request *rq;
3140
3141                         rq = i915_gem_request_alloc(engine,
3142                                                     dev_priv->kernel_context);
3143                         if (!IS_ERR(rq))
3144                                 __i915_add_request(rq, false);
3145                 }
3146         }
3147
3148         i915_gem_restore_fences(dev_priv);
3149
3150         if (dev_priv->gt.awake) {
3151                 intel_sanitize_gt_powersave(dev_priv);
3152                 intel_enable_gt_powersave(dev_priv);
3153                 if (INTEL_GEN(dev_priv) >= 6)
3154                         gen6_rps_busy(dev_priv);
3155         }
3156 }
3157
3158 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3159 {
3160         tasklet_enable(&engine->execlists.tasklet);
3161         kthread_unpark(engine->breadcrumbs.signaler);
3162
3163         intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3164 }
3165
3166 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3167 {
3168         struct intel_engine_cs *engine;
3169         enum intel_engine_id id;
3170
3171         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3172
3173         for_each_engine(engine, dev_priv, id) {
3174                 engine->hangcheck.active_request = NULL;
3175                 i915_gem_reset_finish_engine(engine);
3176         }
3177 }
3178
3179 static void nop_submit_request(struct drm_i915_gem_request *request)
3180 {
3181         dma_fence_set_error(&request->fence, -EIO);
3182
3183         i915_gem_request_submit(request);
3184 }
3185
3186 static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3187 {
3188         unsigned long flags;
3189
3190         dma_fence_set_error(&request->fence, -EIO);
3191
3192         spin_lock_irqsave(&request->engine->timeline->lock, flags);
3193         __i915_gem_request_submit(request);
3194         intel_engine_init_global_seqno(request->engine, request->global_seqno);
3195         spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3196 }
3197
3198 void i915_gem_set_wedged(struct drm_i915_private *i915)
3199 {
3200         struct intel_engine_cs *engine;
3201         enum intel_engine_id id;
3202
3203         /*
3204          * First, stop submission to hw, but do not yet complete requests by
3205          * rolling the global seqno forward (since this would complete requests
3206          * for which we haven't set the fence error to EIO yet).
3207          */
3208         for_each_engine(engine, i915, id) {
3209                 i915_gem_reset_prepare_engine(engine);
3210                 engine->submit_request = nop_submit_request;
3211         }
3212
3213         /*
3214          * Make sure no one is running the old callback before we proceed with
3215          * cancelling requests and resetting the completion tracking. Otherwise
3216          * we might submit a request to the hardware which never completes.
3217          */
3218         synchronize_rcu();
3219
3220         for_each_engine(engine, i915, id) {
3221                 /* Mark all executing requests as skipped */
3222                 engine->cancel_requests(engine);
3223
3224                 /*
3225                  * Only once we've force-cancelled all in-flight requests can we
3226                  * start to complete all requests.
3227                  */
3228                 engine->submit_request = nop_complete_submit_request;
3229         }
3230
3231         /*
3232          * Make sure no request can slip through without getting completed by
3233          * either this call here to intel_engine_init_global_seqno, or the one
3234          * in nop_complete_submit_request.
3235          */
3236         synchronize_rcu();
3237
3238         for_each_engine(engine, i915, id) {
3239                 unsigned long flags;
3240
3241                 /* Mark all pending requests as complete so that any concurrent
3242                  * (lockless) lookup doesn't try and wait upon the request as we
3243                  * reset it.
3244                  */
3245                 spin_lock_irqsave(&engine->timeline->lock, flags);
3246                 intel_engine_init_global_seqno(engine,
3247                                                intel_engine_last_submit(engine));
3248                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3249
3250                 i915_gem_reset_finish_engine(engine);
3251         }
3252
3253         set_bit(I915_WEDGED, &i915->gpu_error.flags);
3254         wake_up_all(&i915->gpu_error.reset_queue);
3255 }
3256
3257 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3258 {
3259         struct i915_gem_timeline *tl;
3260         int i;
3261
3262         lockdep_assert_held(&i915->drm.struct_mutex);
3263         if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3264                 return true;
3265
3266         /* Before unwedging, make sure that all pending operations
3267          * are flushed and errored out - we may have requests waiting upon
3268          * third party fences. We marked all inflight requests as EIO, and
3269          * every execbuf since returned EIO, for consistency we want all
3270          * the currently pending requests to also be marked as EIO, which
3271          * is done inside our nop_submit_request - and so we must wait.
3272          *
3273          * No more can be submitted until we reset the wedged bit.
3274          */
3275         list_for_each_entry(tl, &i915->gt.timelines, link) {
3276                 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3277                         struct drm_i915_gem_request *rq;
3278
3279                         rq = i915_gem_active_peek(&tl->engine[i].last_request,
3280                                                   &i915->drm.struct_mutex);
3281                         if (!rq)
3282                                 continue;
3283
3284                         /* We can't use our normal waiter as we want to
3285                          * avoid recursively trying to handle the current
3286                          * reset. The basic dma_fence_default_wait() installs
3287                          * a callback for dma_fence_signal(), which is
3288                          * triggered by our nop handler (indirectly, the
3289                          * callback enables the signaler thread which is
3290                          * woken by the nop_submit_request() advancing the seqno
3291                          * and when the seqno passes the fence, the signaler
3292                          * then signals the fence waking us up).
3293                          */
3294                         if (dma_fence_default_wait(&rq->fence, true,
3295                                                    MAX_SCHEDULE_TIMEOUT) < 0)
3296                                 return false;
3297                 }
3298         }
3299
3300         /* Undo nop_submit_request. We prevent all new i915 requests from
3301          * being queued (by disallowing execbuf whilst wedged) so having
3302          * waited for all active requests above, we know the system is idle
3303          * and do not have to worry about a thread being inside
3304          * engine->submit_request() as we swap over. So unlike installing
3305          * the nop_submit_request on reset, we can do this from normal
3306          * context and do not require stop_machine().
3307          */
3308         intel_engines_reset_default_submission(i915);
3309         i915_gem_contexts_lost(i915);
3310
3311         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3312         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3313
3314         return true;
3315 }
3316
3317 static void
3318 i915_gem_retire_work_handler(struct work_struct *work)
3319 {
3320         struct drm_i915_private *dev_priv =