2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include "intel_drv.h"
33 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
35 return to_i915(node->minor->dev);
38 static __always_inline void seq_print_param(struct seq_file *m,
43 if (!__builtin_strcmp(type, "bool"))
44 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
45 else if (!__builtin_strcmp(type, "int"))
46 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
47 else if (!__builtin_strcmp(type, "unsigned int"))
48 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
49 else if (!__builtin_strcmp(type, "char *"))
50 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
55 static int i915_capabilities(struct seq_file *m, void *data)
57 struct drm_i915_private *dev_priv = node_to_i915(m->private);
58 const struct intel_device_info *info = INTEL_INFO(dev_priv);
60 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
61 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
68 kernel_param_lock(THIS_MODULE);
69 #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
70 I915_PARAMS_FOR_EACH(PRINT_PARAM);
72 kernel_param_unlock(THIS_MODULE);
77 static char get_active_flag(struct drm_i915_gem_object *obj)
79 return i915_gem_object_is_active(obj) ? '*' : ' ';
82 static char get_pin_flag(struct drm_i915_gem_object *obj)
84 return obj->pin_display ? 'p' : ' ';
87 static char get_tiling_flag(struct drm_i915_gem_object *obj)
89 switch (i915_gem_object_get_tiling(obj)) {
91 case I915_TILING_NONE: return ' ';
92 case I915_TILING_X: return 'X';
93 case I915_TILING_Y: return 'Y';
97 static char get_global_flag(struct drm_i915_gem_object *obj)
99 return !list_empty(&obj->userfault_link) ? 'g' : ' ';
102 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
104 return obj->mm.mapping ? 'M' : ' ';
107 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
110 struct i915_vma *vma;
112 list_for_each_entry(vma, &obj->vma_list, obj_link) {
113 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
114 size += vma->node.size;
121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
123 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
124 struct intel_engine_cs *engine;
125 struct i915_vma *vma;
126 unsigned int frontbuffer_bits;
129 lockdep_assert_held(&obj->base.dev->struct_mutex);
131 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
133 get_active_flag(obj),
135 get_tiling_flag(obj),
136 get_global_flag(obj),
137 get_pin_mapped_flag(obj),
138 obj->base.size / 1024,
139 obj->base.read_domains,
140 obj->base.write_domain,
141 i915_cache_level_str(dev_priv, obj->cache_level),
142 obj->mm.dirty ? " dirty" : "",
143 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
145 seq_printf(m, " (name: %d)", obj->base.name);
146 list_for_each_entry(vma, &obj->vma_list, obj_link) {
147 if (i915_vma_is_pinned(vma))
150 seq_printf(m, " (pinned x %d)", pin_count);
151 if (obj->pin_display)
152 seq_printf(m, " (display)");
153 list_for_each_entry(vma, &obj->vma_list, obj_link) {
154 if (!drm_mm_node_allocated(&vma->node))
157 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
158 i915_vma_is_ggtt(vma) ? "g" : "pp",
159 vma->node.start, vma->node.size);
160 if (i915_vma_is_ggtt(vma)) {
161 switch (vma->ggtt_view.type) {
162 case I915_GGTT_VIEW_NORMAL:
163 seq_puts(m, ", normal");
166 case I915_GGTT_VIEW_PARTIAL:
167 seq_printf(m, ", partial [%08llx+%x]",
168 vma->ggtt_view.partial.offset << PAGE_SHIFT,
169 vma->ggtt_view.partial.size << PAGE_SHIFT);
172 case I915_GGTT_VIEW_ROTATED:
173 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
174 vma->ggtt_view.rotated.plane[0].width,
175 vma->ggtt_view.rotated.plane[0].height,
176 vma->ggtt_view.rotated.plane[0].stride,
177 vma->ggtt_view.rotated.plane[0].offset,
178 vma->ggtt_view.rotated.plane[1].width,
179 vma->ggtt_view.rotated.plane[1].height,
180 vma->ggtt_view.rotated.plane[1].stride,
181 vma->ggtt_view.rotated.plane[1].offset);
185 MISSING_CASE(vma->ggtt_view.type);
190 seq_printf(m, " , fence: %d%s",
192 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
196 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
198 engine = i915_gem_object_last_write_engine(obj);
200 seq_printf(m, " (%s)", engine->name);
202 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
203 if (frontbuffer_bits)
204 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
207 static int obj_rank_by_stolen(const void *A, const void *B)
209 const struct drm_i915_gem_object *a =
210 *(const struct drm_i915_gem_object **)A;
211 const struct drm_i915_gem_object *b =
212 *(const struct drm_i915_gem_object **)B;
214 if (a->stolen->start < b->stolen->start)
216 if (a->stolen->start > b->stolen->start)
221 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
223 struct drm_i915_private *dev_priv = node_to_i915(m->private);
224 struct drm_device *dev = &dev_priv->drm;
225 struct drm_i915_gem_object **objects;
226 struct drm_i915_gem_object *obj;
227 u64 total_obj_size, total_gtt_size;
228 unsigned long total, count, n;
231 total = READ_ONCE(dev_priv->mm.object_count);
232 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
236 ret = mutex_lock_interruptible(&dev->struct_mutex);
240 total_obj_size = total_gtt_size = count = 0;
241 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
245 if (obj->stolen == NULL)
248 objects[count++] = obj;
249 total_obj_size += obj->base.size;
250 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
253 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
257 if (obj->stolen == NULL)
260 objects[count++] = obj;
261 total_obj_size += obj->base.size;
264 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
266 seq_puts(m, "Stolen:\n");
267 for (n = 0; n < count; n++) {
269 describe_obj(m, objects[n]);
272 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
273 count, total_obj_size, total_gtt_size);
275 mutex_unlock(&dev->struct_mutex);
282 struct drm_i915_file_private *file_priv;
286 u64 active, inactive;
289 static int per_file_stats(int id, void *ptr, void *data)
291 struct drm_i915_gem_object *obj = ptr;
292 struct file_stats *stats = data;
293 struct i915_vma *vma;
296 stats->total += obj->base.size;
297 if (!obj->bind_count)
298 stats->unbound += obj->base.size;
299 if (obj->base.name || obj->base.dma_buf)
300 stats->shared += obj->base.size;
302 list_for_each_entry(vma, &obj->vma_list, obj_link) {
303 if (!drm_mm_node_allocated(&vma->node))
306 if (i915_vma_is_ggtt(vma)) {
307 stats->global += vma->node.size;
309 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
311 if (ppgtt->base.file != stats->file_priv)
315 if (i915_vma_is_active(vma))
316 stats->active += vma->node.size;
318 stats->inactive += vma->node.size;
324 #define print_file_stats(m, name, stats) do { \
326 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
337 static void print_batch_pool_stats(struct seq_file *m,
338 struct drm_i915_private *dev_priv)
340 struct drm_i915_gem_object *obj;
341 struct file_stats stats;
342 struct intel_engine_cs *engine;
343 enum intel_engine_id id;
346 memset(&stats, 0, sizeof(stats));
348 for_each_engine(engine, dev_priv, id) {
349 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
350 list_for_each_entry(obj,
351 &engine->batch_pool.cache_list[j],
353 per_file_stats(0, obj, &stats);
357 print_file_stats(m, "[k]batch pool", stats);
360 static int per_file_ctx_stats(int id, void *ptr, void *data)
362 struct i915_gem_context *ctx = ptr;
365 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
366 if (ctx->engine[n].state)
367 per_file_stats(0, ctx->engine[n].state->obj, data);
368 if (ctx->engine[n].ring)
369 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
375 static void print_context_stats(struct seq_file *m,
376 struct drm_i915_private *dev_priv)
378 struct drm_device *dev = &dev_priv->drm;
379 struct file_stats stats;
380 struct drm_file *file;
382 memset(&stats, 0, sizeof(stats));
384 mutex_lock(&dev->struct_mutex);
385 if (dev_priv->kernel_context)
386 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
388 list_for_each_entry(file, &dev->filelist, lhead) {
389 struct drm_i915_file_private *fpriv = file->driver_priv;
390 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
392 mutex_unlock(&dev->struct_mutex);
394 print_file_stats(m, "[k]contexts", stats);
397 static int i915_gem_object_info(struct seq_file *m, void *data)
399 struct drm_i915_private *dev_priv = node_to_i915(m->private);
400 struct drm_device *dev = &dev_priv->drm;
401 struct i915_ggtt *ggtt = &dev_priv->ggtt;
402 u32 count, mapped_count, purgeable_count, dpy_count;
403 u64 size, mapped_size, purgeable_size, dpy_size;
404 struct drm_i915_gem_object *obj;
405 struct drm_file *file;
408 ret = mutex_lock_interruptible(&dev->struct_mutex);
412 seq_printf(m, "%u objects, %llu bytes\n",
413 dev_priv->mm.object_count,
414 dev_priv->mm.object_memory);
417 mapped_size = mapped_count = 0;
418 purgeable_size = purgeable_count = 0;
419 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
420 size += obj->base.size;
423 if (obj->mm.madv == I915_MADV_DONTNEED) {
424 purgeable_size += obj->base.size;
428 if (obj->mm.mapping) {
430 mapped_size += obj->base.size;
433 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
435 size = count = dpy_size = dpy_count = 0;
436 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
437 size += obj->base.size;
440 if (obj->pin_display) {
441 dpy_size += obj->base.size;
445 if (obj->mm.madv == I915_MADV_DONTNEED) {
446 purgeable_size += obj->base.size;
450 if (obj->mm.mapping) {
452 mapped_size += obj->base.size;
455 seq_printf(m, "%u bound objects, %llu bytes\n",
457 seq_printf(m, "%u purgeable objects, %llu bytes\n",
458 purgeable_count, purgeable_size);
459 seq_printf(m, "%u mapped objects, %llu bytes\n",
460 mapped_count, mapped_size);
461 seq_printf(m, "%u display objects (pinned), %llu bytes\n",
462 dpy_count, dpy_size);
464 seq_printf(m, "%llu [%llu] gtt total\n",
465 ggtt->base.total, ggtt->mappable_end);
468 print_batch_pool_stats(m, dev_priv);
469 mutex_unlock(&dev->struct_mutex);
471 mutex_lock(&dev->filelist_mutex);
472 print_context_stats(m, dev_priv);
473 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
474 struct file_stats stats;
475 struct drm_i915_file_private *file_priv = file->driver_priv;
476 struct drm_i915_gem_request *request;
477 struct task_struct *task;
479 memset(&stats, 0, sizeof(stats));
480 stats.file_priv = file->driver_priv;
481 spin_lock(&file->table_lock);
482 idr_for_each(&file->object_idr, per_file_stats, &stats);
483 spin_unlock(&file->table_lock);
485 * Although we have a valid reference on file->pid, that does
486 * not guarantee that the task_struct who called get_pid() is
487 * still alive (e.g. get_pid(current) => fork() => exit()).
488 * Therefore, we need to protect this ->comm access using RCU.
490 mutex_lock(&dev->struct_mutex);
491 request = list_first_entry_or_null(&file_priv->mm.request_list,
492 struct drm_i915_gem_request,
495 task = pid_task(request && request->ctx->pid ?
496 request->ctx->pid : file->pid,
498 print_file_stats(m, task ? task->comm : "<unknown>", stats);
500 mutex_unlock(&dev->struct_mutex);
502 mutex_unlock(&dev->filelist_mutex);
507 static int i915_gem_gtt_info(struct seq_file *m, void *data)
509 struct drm_info_node *node = m->private;
510 struct drm_i915_private *dev_priv = node_to_i915(node);
511 struct drm_device *dev = &dev_priv->drm;
512 bool show_pin_display_only = !!node->info_ent->data;
513 struct drm_i915_gem_object *obj;
514 u64 total_obj_size, total_gtt_size;
517 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 total_obj_size = total_gtt_size = count = 0;
522 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
523 if (show_pin_display_only && !obj->pin_display)
527 describe_obj(m, obj);
529 total_obj_size += obj->base.size;
530 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
534 mutex_unlock(&dev->struct_mutex);
536 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
537 count, total_obj_size, total_gtt_size);
542 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
544 struct drm_i915_private *dev_priv = node_to_i915(m->private);
545 struct drm_device *dev = &dev_priv->drm;
546 struct intel_crtc *crtc;
549 ret = mutex_lock_interruptible(&dev->struct_mutex);
553 for_each_intel_crtc(dev, crtc) {
554 const char pipe = pipe_name(crtc->pipe);
555 const char plane = plane_name(crtc->plane);
556 struct intel_flip_work *work;
558 spin_lock_irq(&dev->event_lock);
559 work = crtc->flip_work;
561 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
567 pending = atomic_read(&work->pending);
569 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
572 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
575 if (work->flip_queued_req) {
576 struct intel_engine_cs *engine = work->flip_queued_req->engine;
578 seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
580 work->flip_queued_req->global_seqno,
581 intel_engine_last_submit(engine),
582 intel_engine_get_seqno(engine),
583 i915_gem_request_completed(work->flip_queued_req));
585 seq_printf(m, "Flip not associated with any ring\n");
586 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
587 work->flip_queued_vblank,
588 work->flip_ready_vblank,
589 intel_crtc_get_vblank_counter(crtc));
590 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
592 if (INTEL_GEN(dev_priv) >= 4)
593 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
595 addr = I915_READ(DSPADDR(crtc->plane));
596 seq_printf(m, "Current scanout address 0x%08x\n", addr);
598 if (work->pending_flip_obj) {
599 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
600 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
603 spin_unlock_irq(&dev->event_lock);
606 mutex_unlock(&dev->struct_mutex);
611 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
613 struct drm_i915_private *dev_priv = node_to_i915(m->private);
614 struct drm_device *dev = &dev_priv->drm;
615 struct drm_i915_gem_object *obj;
616 struct intel_engine_cs *engine;
617 enum intel_engine_id id;
621 ret = mutex_lock_interruptible(&dev->struct_mutex);
625 for_each_engine(engine, dev_priv, id) {
626 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
630 list_for_each_entry(obj,
631 &engine->batch_pool.cache_list[j],
634 seq_printf(m, "%s cache[%d]: %d objects\n",
635 engine->name, j, count);
637 list_for_each_entry(obj,
638 &engine->batch_pool.cache_list[j],
641 describe_obj(m, obj);
649 seq_printf(m, "total: %d\n", total);
651 mutex_unlock(&dev->struct_mutex);
656 static void print_request(struct seq_file *m,
657 struct drm_i915_gem_request *rq,
660 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
661 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
662 rq->priotree.priority,
663 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
664 rq->timeline->common->name);
667 static int i915_gem_request_info(struct seq_file *m, void *data)
669 struct drm_i915_private *dev_priv = node_to_i915(m->private);
670 struct drm_device *dev = &dev_priv->drm;
671 struct drm_i915_gem_request *req;
672 struct intel_engine_cs *engine;
673 enum intel_engine_id id;
676 ret = mutex_lock_interruptible(&dev->struct_mutex);
681 for_each_engine(engine, dev_priv, id) {
685 list_for_each_entry(req, &engine->timeline->requests, link)
690 seq_printf(m, "%s requests: %d\n", engine->name, count);
691 list_for_each_entry(req, &engine->timeline->requests, link)
692 print_request(m, req, " ");
696 mutex_unlock(&dev->struct_mutex);
699 seq_puts(m, "No requests\n");
704 static void i915_ring_seqno_info(struct seq_file *m,
705 struct intel_engine_cs *engine)
707 struct intel_breadcrumbs *b = &engine->breadcrumbs;
710 seq_printf(m, "Current sequence (%s): %x\n",
711 engine->name, intel_engine_get_seqno(engine));
713 spin_lock_irq(&b->rb_lock);
714 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
715 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
717 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
718 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
720 spin_unlock_irq(&b->rb_lock);
723 static int i915_gem_seqno_info(struct seq_file *m, void *data)
725 struct drm_i915_private *dev_priv = node_to_i915(m->private);
726 struct intel_engine_cs *engine;
727 enum intel_engine_id id;
729 for_each_engine(engine, dev_priv, id)
730 i915_ring_seqno_info(m, engine);
736 static int i915_interrupt_info(struct seq_file *m, void *data)
738 struct drm_i915_private *dev_priv = node_to_i915(m->private);
739 struct intel_engine_cs *engine;
740 enum intel_engine_id id;
743 intel_runtime_pm_get(dev_priv);
745 if (IS_CHERRYVIEW(dev_priv)) {
746 seq_printf(m, "Master Interrupt Control:\t%08x\n",
747 I915_READ(GEN8_MASTER_IRQ));
749 seq_printf(m, "Display IER:\t%08x\n",
751 seq_printf(m, "Display IIR:\t%08x\n",
753 seq_printf(m, "Display IIR_RW:\t%08x\n",
754 I915_READ(VLV_IIR_RW));
755 seq_printf(m, "Display IMR:\t%08x\n",
757 for_each_pipe(dev_priv, pipe) {
758 enum intel_display_power_domain power_domain;
760 power_domain = POWER_DOMAIN_PIPE(pipe);
761 if (!intel_display_power_get_if_enabled(dev_priv,
763 seq_printf(m, "Pipe %c power disabled\n",
768 seq_printf(m, "Pipe %c stat:\t%08x\n",
770 I915_READ(PIPESTAT(pipe)));
772 intel_display_power_put(dev_priv, power_domain);
775 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
776 seq_printf(m, "Port hotplug:\t%08x\n",
777 I915_READ(PORT_HOTPLUG_EN));
778 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
779 I915_READ(VLV_DPFLIPSTAT));
780 seq_printf(m, "DPINVGTT:\t%08x\n",
781 I915_READ(DPINVGTT));
782 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
784 for (i = 0; i < 4; i++) {
785 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
786 i, I915_READ(GEN8_GT_IMR(i)));
787 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
788 i, I915_READ(GEN8_GT_IIR(i)));
789 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
790 i, I915_READ(GEN8_GT_IER(i)));
793 seq_printf(m, "PCU interrupt mask:\t%08x\n",
794 I915_READ(GEN8_PCU_IMR));
795 seq_printf(m, "PCU interrupt identity:\t%08x\n",
796 I915_READ(GEN8_PCU_IIR));
797 seq_printf(m, "PCU interrupt enable:\t%08x\n",
798 I915_READ(GEN8_PCU_IER));
799 } else if (INTEL_GEN(dev_priv) >= 8) {
800 seq_printf(m, "Master Interrupt Control:\t%08x\n",
801 I915_READ(GEN8_MASTER_IRQ));
803 for (i = 0; i < 4; i++) {
804 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
805 i, I915_READ(GEN8_GT_IMR(i)));
806 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
807 i, I915_READ(GEN8_GT_IIR(i)));
808 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
809 i, I915_READ(GEN8_GT_IER(i)));
812 for_each_pipe(dev_priv, pipe) {
813 enum intel_display_power_domain power_domain;
815 power_domain = POWER_DOMAIN_PIPE(pipe);
816 if (!intel_display_power_get_if_enabled(dev_priv,
818 seq_printf(m, "Pipe %c power disabled\n",
822 seq_printf(m, "Pipe %c IMR:\t%08x\n",
824 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
825 seq_printf(m, "Pipe %c IIR:\t%08x\n",
827 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
828 seq_printf(m, "Pipe %c IER:\t%08x\n",
830 I915_READ(GEN8_DE_PIPE_IER(pipe)));
832 intel_display_power_put(dev_priv, power_domain);
835 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
836 I915_READ(GEN8_DE_PORT_IMR));
837 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
838 I915_READ(GEN8_DE_PORT_IIR));
839 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
840 I915_READ(GEN8_DE_PORT_IER));
842 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
843 I915_READ(GEN8_DE_MISC_IMR));
844 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
845 I915_READ(GEN8_DE_MISC_IIR));
846 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
847 I915_READ(GEN8_DE_MISC_IER));
849 seq_printf(m, "PCU interrupt mask:\t%08x\n",
850 I915_READ(GEN8_PCU_IMR));
851 seq_printf(m, "PCU interrupt identity:\t%08x\n",
852 I915_READ(GEN8_PCU_IIR));
853 seq_printf(m, "PCU interrupt enable:\t%08x\n",
854 I915_READ(GEN8_PCU_IER));
855 } else if (IS_VALLEYVIEW(dev_priv)) {
856 seq_printf(m, "Display IER:\t%08x\n",
858 seq_printf(m, "Display IIR:\t%08x\n",
860 seq_printf(m, "Display IIR_RW:\t%08x\n",
861 I915_READ(VLV_IIR_RW));
862 seq_printf(m, "Display IMR:\t%08x\n",
864 for_each_pipe(dev_priv, pipe) {
865 enum intel_display_power_domain power_domain;
867 power_domain = POWER_DOMAIN_PIPE(pipe);
868 if (!intel_display_power_get_if_enabled(dev_priv,
870 seq_printf(m, "Pipe %c power disabled\n",
875 seq_printf(m, "Pipe %c stat:\t%08x\n",
877 I915_READ(PIPESTAT(pipe)));
878 intel_display_power_put(dev_priv, power_domain);
881 seq_printf(m, "Master IER:\t%08x\n",
882 I915_READ(VLV_MASTER_IER));
884 seq_printf(m, "Render IER:\t%08x\n",
886 seq_printf(m, "Render IIR:\t%08x\n",
888 seq_printf(m, "Render IMR:\t%08x\n",
891 seq_printf(m, "PM IER:\t\t%08x\n",
892 I915_READ(GEN6_PMIER));
893 seq_printf(m, "PM IIR:\t\t%08x\n",
894 I915_READ(GEN6_PMIIR));
895 seq_printf(m, "PM IMR:\t\t%08x\n",
896 I915_READ(GEN6_PMIMR));
898 seq_printf(m, "Port hotplug:\t%08x\n",
899 I915_READ(PORT_HOTPLUG_EN));
900 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
901 I915_READ(VLV_DPFLIPSTAT));
902 seq_printf(m, "DPINVGTT:\t%08x\n",
903 I915_READ(DPINVGTT));
905 } else if (!HAS_PCH_SPLIT(dev_priv)) {
906 seq_printf(m, "Interrupt enable: %08x\n",
908 seq_printf(m, "Interrupt identity: %08x\n",
910 seq_printf(m, "Interrupt mask: %08x\n",
912 for_each_pipe(dev_priv, pipe)
913 seq_printf(m, "Pipe %c stat: %08x\n",
915 I915_READ(PIPESTAT(pipe)));
917 seq_printf(m, "North Display Interrupt enable: %08x\n",
919 seq_printf(m, "North Display Interrupt identity: %08x\n",
921 seq_printf(m, "North Display Interrupt mask: %08x\n",
923 seq_printf(m, "South Display Interrupt enable: %08x\n",
925 seq_printf(m, "South Display Interrupt identity: %08x\n",
927 seq_printf(m, "South Display Interrupt mask: %08x\n",
929 seq_printf(m, "Graphics Interrupt enable: %08x\n",
931 seq_printf(m, "Graphics Interrupt identity: %08x\n",
933 seq_printf(m, "Graphics Interrupt mask: %08x\n",
936 for_each_engine(engine, dev_priv, id) {
937 if (INTEL_GEN(dev_priv) >= 6) {
939 "Graphics Interrupt mask (%s): %08x\n",
940 engine->name, I915_READ_IMR(engine));
942 i915_ring_seqno_info(m, engine);
944 intel_runtime_pm_put(dev_priv);
949 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
951 struct drm_i915_private *dev_priv = node_to_i915(m->private);
952 struct drm_device *dev = &dev_priv->drm;
955 ret = mutex_lock_interruptible(&dev->struct_mutex);
959 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
960 for (i = 0; i < dev_priv->num_fence_regs; i++) {
961 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
963 seq_printf(m, "Fence %d, pin count = %d, object = ",
964 i, dev_priv->fence_regs[i].pin_count);
966 seq_puts(m, "unused");
968 describe_obj(m, vma->obj);
972 mutex_unlock(&dev->struct_mutex);
976 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
977 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
978 size_t count, loff_t *pos)
980 struct i915_gpu_state *error = file->private_data;
981 struct drm_i915_error_state_buf str;
988 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
992 ret = i915_error_state_to_str(&str, error);
997 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
1001 *pos = str.start + ret;
1003 i915_error_state_buf_release(&str);
1007 static int gpu_state_release(struct inode *inode, struct file *file)
1009 i915_gpu_state_put(file->private_data);
1013 static int i915_gpu_info_open(struct inode *inode, struct file *file)
1015 struct drm_i915_private *i915 = inode->i_private;
1016 struct i915_gpu_state *gpu;
1018 intel_runtime_pm_get(i915);
1019 gpu = i915_capture_gpu_state(i915);
1020 intel_runtime_pm_put(i915);
1024 file->private_data = gpu;
1028 static const struct file_operations i915_gpu_info_fops = {
1029 .owner = THIS_MODULE,
1030 .open = i915_gpu_info_open,
1031 .read = gpu_state_read,
1032 .llseek = default_llseek,
1033 .release = gpu_state_release,
1037 i915_error_state_write(struct file *filp,
1038 const char __user *ubuf,
1042 struct i915_gpu_state *error = filp->private_data;
1047 DRM_DEBUG_DRIVER("Resetting error state\n");
1048 i915_reset_error_state(error->i915);
1053 static int i915_error_state_open(struct inode *inode, struct file *file)
1055 file->private_data = i915_first_error_state(inode->i_private);
1059 static const struct file_operations i915_error_state_fops = {
1060 .owner = THIS_MODULE,
1061 .open = i915_error_state_open,
1062 .read = gpu_state_read,
1063 .write = i915_error_state_write,
1064 .llseek = default_llseek,
1065 .release = gpu_state_release,
1070 i915_next_seqno_set(void *data, u64 val)
1072 struct drm_i915_private *dev_priv = data;
1073 struct drm_device *dev = &dev_priv->drm;
1076 ret = mutex_lock_interruptible(&dev->struct_mutex);
1080 ret = i915_gem_set_global_seqno(dev, val);
1081 mutex_unlock(&dev->struct_mutex);
1086 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1087 NULL, i915_next_seqno_set,
1090 static int i915_frequency_info(struct seq_file *m, void *unused)
1092 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1095 intel_runtime_pm_get(dev_priv);
1097 if (IS_GEN5(dev_priv)) {
1098 u16 rgvswctl = I915_READ16(MEMSWCTL);
1099 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1101 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1102 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1103 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1105 seq_printf(m, "Current P-state: %d\n",
1106 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1107 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1110 mutex_lock(&dev_priv->rps.hw_lock);
1111 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1112 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1113 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1115 seq_printf(m, "actual GPU freq: %d MHz\n",
1116 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1118 seq_printf(m, "current GPU freq: %d MHz\n",
1119 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1121 seq_printf(m, "max GPU freq: %d MHz\n",
1122 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1124 seq_printf(m, "min GPU freq: %d MHz\n",
1125 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1127 seq_printf(m, "idle GPU freq: %d MHz\n",
1128 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1131 "efficient (RPe) frequency: %d MHz\n",
1132 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1133 mutex_unlock(&dev_priv->rps.hw_lock);
1134 } else if (INTEL_GEN(dev_priv) >= 6) {
1135 u32 rp_state_limits;
1138 u32 rpmodectl, rpinclimit, rpdeclimit;
1139 u32 rpstat, cagf, reqf;
1140 u32 rpupei, rpcurup, rpprevup;
1141 u32 rpdownei, rpcurdown, rpprevdown;
1142 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1145 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1146 if (IS_GEN9_LP(dev_priv)) {
1147 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1148 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1150 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1151 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1154 /* RPSTAT1 is in the GT power well */
1155 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1157 reqf = I915_READ(GEN6_RPNSWREQ);
1158 if (IS_GEN9(dev_priv))
1161 reqf &= ~GEN6_TURBO_DISABLE;
1162 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1167 reqf = intel_gpu_freq(dev_priv, reqf);
1169 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1170 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1171 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1173 rpstat = I915_READ(GEN6_RPSTAT1);
1174 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1175 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1176 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1177 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1178 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1179 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1180 if (IS_GEN9(dev_priv))
1181 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1182 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1183 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1185 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1186 cagf = intel_gpu_freq(dev_priv, cagf);
1188 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1190 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1191 pm_ier = I915_READ(GEN6_PMIER);
1192 pm_imr = I915_READ(GEN6_PMIMR);
1193 pm_isr = I915_READ(GEN6_PMISR);
1194 pm_iir = I915_READ(GEN6_PMIIR);
1195 pm_mask = I915_READ(GEN6_PMINTRMSK);
1197 pm_ier = I915_READ(GEN8_GT_IER(2));
1198 pm_imr = I915_READ(GEN8_GT_IMR(2));
1199 pm_isr = I915_READ(GEN8_GT_ISR(2));
1200 pm_iir = I915_READ(GEN8_GT_IIR(2));
1201 pm_mask = I915_READ(GEN6_PMINTRMSK);
1203 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1204 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1205 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1206 dev_priv->rps.pm_intrmsk_mbz);
1207 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1208 seq_printf(m, "Render p-state ratio: %d\n",
1209 (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
1210 seq_printf(m, "Render p-state VID: %d\n",
1211 gt_perf_status & 0xff);
1212 seq_printf(m, "Render p-state limit: %d\n",
1213 rp_state_limits & 0xff);
1214 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1215 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1216 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1217 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1218 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1219 seq_printf(m, "CAGF: %dMHz\n", cagf);
1220 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1221 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1222 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1223 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1224 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1225 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1226 seq_printf(m, "Up threshold: %d%%\n",
1227 dev_priv->rps.up_threshold);
1229 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1230 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1231 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1232 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1233 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1234 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1235 seq_printf(m, "Down threshold: %d%%\n",
1236 dev_priv->rps.down_threshold);
1238 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1239 rp_state_cap >> 16) & 0xff;
1240 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1241 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1242 intel_gpu_freq(dev_priv, max_freq));
1244 max_freq = (rp_state_cap & 0xff00) >> 8;
1245 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1246 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1247 intel_gpu_freq(dev_priv, max_freq));
1249 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1250 rp_state_cap >> 0) & 0xff;
1251 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
1252 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1253 intel_gpu_freq(dev_priv, max_freq));
1254 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1255 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1257 seq_printf(m, "Current freq: %d MHz\n",
1258 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1259 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1260 seq_printf(m, "Idle freq: %d MHz\n",
1261 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1262 seq_printf(m, "Min freq: %d MHz\n",
1263 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1264 seq_printf(m, "Boost freq: %d MHz\n",
1265 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1266 seq_printf(m, "Max freq: %d MHz\n",
1267 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1269 "efficient (RPe) frequency: %d MHz\n",
1270 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1272 seq_puts(m, "no P-state info available\n");
1275 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1276 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1277 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1279 intel_runtime_pm_put(dev_priv);
1283 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1285 struct intel_instdone *instdone)
1290 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1291 instdone->instdone);
1293 if (INTEL_GEN(dev_priv) <= 3)
1296 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1297 instdone->slice_common);
1299 if (INTEL_GEN(dev_priv) <= 6)
1302 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1303 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1304 slice, subslice, instdone->sampler[slice][subslice]);
1306 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1307 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1308 slice, subslice, instdone->row[slice][subslice]);
1311 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1313 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1314 struct intel_engine_cs *engine;
1315 u64 acthd[I915_NUM_ENGINES];
1316 u32 seqno[I915_NUM_ENGINES];
1317 struct intel_instdone instdone;
1318 enum intel_engine_id id;
1320 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Wedged\n");
1322 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1323 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1324 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1325 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1326 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1327 seq_puts(m, "Waiter holding struct mutex\n");
1328 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1329 seq_puts(m, "struct_mutex blocked for reset\n");
1331 if (!i915.enable_hangcheck) {
1332 seq_puts(m, "Hangcheck disabled\n");
1336 intel_runtime_pm_get(dev_priv);
1338 for_each_engine(engine, dev_priv, id) {
1339 acthd[id] = intel_engine_get_active_head(engine);
1340 seqno[id] = intel_engine_get_seqno(engine);
1343 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1345 intel_runtime_pm_put(dev_priv);
1347 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1348 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1349 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1351 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1352 seq_puts(m, "Hangcheck active, work pending\n");
1354 seq_puts(m, "Hangcheck inactive\n");
1356 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1358 for_each_engine(engine, dev_priv, id) {
1359 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1362 seq_printf(m, "%s:\n", engine->name);
1363 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1364 engine->hangcheck.seqno, seqno[id],
1365 intel_engine_last_submit(engine),
1366 engine->timeline->inflight_seqnos);
1367 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1368 yesno(intel_engine_has_waiter(engine)),
1369 yesno(test_bit(engine->id,
1370 &dev_priv->gpu_error.missed_irq_rings)),
1371 yesno(engine->hangcheck.stalled));
1373 spin_lock_irq(&b->rb_lock);
1374 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1375 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1377 seq_printf(m, "\t%s [%d] waiting for %x\n",
1378 w->tsk->comm, w->tsk->pid, w->seqno);
1380 spin_unlock_irq(&b->rb_lock);
1382 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1383 (long long)engine->hangcheck.acthd,
1384 (long long)acthd[id]);
1385 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1386 hangcheck_action_to_str(engine->hangcheck.action),
1387 engine->hangcheck.action,
1388 jiffies_to_msecs(jiffies -
1389 engine->hangcheck.action_timestamp));
1391 if (engine->id == RCS) {
1392 seq_puts(m, "\tinstdone read =\n");
1394 i915_instdone_info(dev_priv, m, &instdone);
1396 seq_puts(m, "\tinstdone accu =\n");
1398 i915_instdone_info(dev_priv, m,
1399 &engine->hangcheck.instdone);
1406 static int ironlake_drpc_info(struct seq_file *m)
1408 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1409 u32 rgvmodectl, rstdbyctl;
1412 rgvmodectl = I915_READ(MEMMODECTL);
1413 rstdbyctl = I915_READ(RSTDBYCTL);
1414 crstandvid = I915_READ16(CRSTANDVID);
1416 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1417 seq_printf(m, "Boost freq: %d\n",
1418 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1419 MEMMODE_BOOST_FREQ_SHIFT);
1420 seq_printf(m, "HW control enabled: %s\n",
1421 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1422 seq_printf(m, "SW control enabled: %s\n",
1423 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1424 seq_printf(m, "Gated voltage change: %s\n",
1425 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1426 seq_printf(m, "Starting frequency: P%d\n",
1427 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1428 seq_printf(m, "Max P-state: P%d\n",
1429 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1430 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1431 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1432 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1433 seq_printf(m, "Render standby enabled: %s\n",
1434 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1435 seq_puts(m, "Current RS state: ");
1436 switch (rstdbyctl & RSX_STATUS_MASK) {
1438 seq_puts(m, "on\n");
1440 case RSX_STATUS_RC1:
1441 seq_puts(m, "RC1\n");
1443 case RSX_STATUS_RC1E:
1444 seq_puts(m, "RC1E\n");
1446 case RSX_STATUS_RS1:
1447 seq_puts(m, "RS1\n");
1449 case RSX_STATUS_RS2:
1450 seq_puts(m, "RS2 (RC6)\n");
1452 case RSX_STATUS_RS3:
1453 seq_puts(m, "RC3 (RC6+)\n");
1456 seq_puts(m, "unknown\n");
1463 static int i915_forcewake_domains(struct seq_file *m, void *data)
1465 struct drm_i915_private *i915 = node_to_i915(m->private);
1466 struct intel_uncore_forcewake_domain *fw_domain;
1469 for_each_fw_domain(fw_domain, i915, tmp)
1470 seq_printf(m, "%s.wake_count = %u\n",
1471 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1472 READ_ONCE(fw_domain->wake_count));
1477 static void print_rc6_res(struct seq_file *m,
1479 const i915_reg_t reg)
1481 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1483 seq_printf(m, "%s %u (%llu us)\n",
1484 title, I915_READ(reg),
1485 intel_rc6_residency_us(dev_priv, reg));
1488 static int vlv_drpc_info(struct seq_file *m)
1490 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1491 u32 rpmodectl1, rcctl1, pw_status;
1493 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1494 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1495 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1497 seq_printf(m, "Video Turbo Mode: %s\n",
1498 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1499 seq_printf(m, "Turbo enabled: %s\n",
1500 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1501 seq_printf(m, "HW control enabled: %s\n",
1502 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1503 seq_printf(m, "SW control enabled: %s\n",
1504 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1505 GEN6_RP_MEDIA_SW_MODE));
1506 seq_printf(m, "RC6 Enabled: %s\n",
1507 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1508 GEN6_RC_CTL_EI_MODE(1))));
1509 seq_printf(m, "Render Power Well: %s\n",
1510 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1511 seq_printf(m, "Media Power Well: %s\n",
1512 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1514 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1515 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1517 return i915_forcewake_domains(m, NULL);
1520 static int gen6_drpc_info(struct seq_file *m)
1522 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1523 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1524 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1525 unsigned forcewake_count;
1528 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1529 if (forcewake_count) {
1530 seq_puts(m, "RC information inaccurate because somebody "
1531 "holds a forcewake reference \n");
1533 /* NB: we cannot use forcewake, else we read the wrong values */
1534 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1536 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1539 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1540 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1542 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1543 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1544 if (INTEL_GEN(dev_priv) >= 9) {
1545 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1546 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1549 mutex_lock(&dev_priv->rps.hw_lock);
1550 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1551 mutex_unlock(&dev_priv->rps.hw_lock);
1553 seq_printf(m, "Video Turbo Mode: %s\n",
1554 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1555 seq_printf(m, "HW control enabled: %s\n",
1556 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1557 seq_printf(m, "SW control enabled: %s\n",
1558 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1559 GEN6_RP_MEDIA_SW_MODE));
1560 seq_printf(m, "RC1e Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1562 seq_printf(m, "RC6 Enabled: %s\n",
1563 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1564 if (INTEL_GEN(dev_priv) >= 9) {
1565 seq_printf(m, "Render Well Gating Enabled: %s\n",
1566 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1567 seq_printf(m, "Media Well Gating Enabled: %s\n",
1568 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1570 seq_printf(m, "Deep RC6 Enabled: %s\n",
1571 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1572 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1573 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1574 seq_puts(m, "Current RC state: ");
1575 switch (gt_core_status & GEN6_RCn_MASK) {
1577 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1578 seq_puts(m, "Core Power Down\n");
1580 seq_puts(m, "on\n");
1583 seq_puts(m, "RC3\n");
1586 seq_puts(m, "RC6\n");
1589 seq_puts(m, "RC7\n");
1592 seq_puts(m, "Unknown\n");
1596 seq_printf(m, "Core Power Down: %s\n",
1597 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1598 if (INTEL_GEN(dev_priv) >= 9) {
1599 seq_printf(m, "Render Power Well: %s\n",
1600 (gen9_powergate_status &
1601 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1602 seq_printf(m, "Media Power Well: %s\n",
1603 (gen9_powergate_status &
1604 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1607 /* Not exactly sure what this is */
1608 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1609 GEN6_GT_GFX_RC6_LOCKED);
1610 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1611 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1612 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1614 seq_printf(m, "RC6 voltage: %dmV\n",
1615 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1616 seq_printf(m, "RC6+ voltage: %dmV\n",
1617 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1618 seq_printf(m, "RC6++ voltage: %dmV\n",
1619 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1620 return i915_forcewake_domains(m, NULL);
1623 static int i915_drpc_info(struct seq_file *m, void *unused)
1625 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1628 intel_runtime_pm_get(dev_priv);
1630 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1631 err = vlv_drpc_info(m);
1632 else if (INTEL_GEN(dev_priv) >= 6)
1633 err = gen6_drpc_info(m);
1635 err = ironlake_drpc_info(m);
1637 intel_runtime_pm_put(dev_priv);
1642 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1644 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1646 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1647 dev_priv->fb_tracking.busy_bits);
1649 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1650 dev_priv->fb_tracking.flip_bits);
1655 static int i915_fbc_status(struct seq_file *m, void *unused)
1657 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1659 if (!HAS_FBC(dev_priv)) {
1660 seq_puts(m, "FBC unsupported on this chipset\n");
1664 intel_runtime_pm_get(dev_priv);
1665 mutex_lock(&dev_priv->fbc.lock);
1667 if (intel_fbc_is_active(dev_priv))
1668 seq_puts(m, "FBC enabled\n");
1670 seq_printf(m, "FBC disabled: %s\n",
1671 dev_priv->fbc.no_fbc_reason);
1673 if (intel_fbc_is_active(dev_priv)) {
1676 if (INTEL_GEN(dev_priv) >= 8)
1677 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1678 else if (INTEL_GEN(dev_priv) >= 7)
1679 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1680 else if (INTEL_GEN(dev_priv) >= 5)
1681 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1682 else if (IS_G4X(dev_priv))
1683 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1685 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1686 FBC_STAT_COMPRESSED);
1688 seq_printf(m, "Compressing: %s\n", yesno(mask));
1691 mutex_unlock(&dev_priv->fbc.lock);
1692 intel_runtime_pm_put(dev_priv);
1697 static int i915_fbc_false_color_get(void *data, u64 *val)
1699 struct drm_i915_private *dev_priv = data;
1701 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1704 *val = dev_priv->fbc.false_color;
1709 static int i915_fbc_false_color_set(void *data, u64 val)
1711 struct drm_i915_private *dev_priv = data;
1714 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1717 mutex_lock(&dev_priv->fbc.lock);
1719 reg = I915_READ(ILK_DPFC_CONTROL);
1720 dev_priv->fbc.false_color = val;
1722 I915_WRITE(ILK_DPFC_CONTROL, val ?
1723 (reg | FBC_CTL_FALSE_COLOR) :
1724 (reg & ~FBC_CTL_FALSE_COLOR));
1726 mutex_unlock(&dev_priv->fbc.lock);
1730 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1731 i915_fbc_false_color_get, i915_fbc_false_color_set,
1734 static int i915_ips_status(struct seq_file *m, void *unused)
1736 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1738 if (!HAS_IPS(dev_priv)) {
1739 seq_puts(m, "not supported\n");
1743 intel_runtime_pm_get(dev_priv);
1745 seq_printf(m, "Enabled by kernel parameter: %s\n",
1746 yesno(i915.enable_ips));
1748 if (INTEL_GEN(dev_priv) >= 8) {
1749 seq_puts(m, "Currently: unknown\n");
1751 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1752 seq_puts(m, "Currently: enabled\n");
1754 seq_puts(m, "Currently: disabled\n");
1757 intel_runtime_pm_put(dev_priv);
1762 static int i915_sr_status(struct seq_file *m, void *unused)
1764 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1765 bool sr_enabled = false;
1767 intel_runtime_pm_get(dev_priv);
1768 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1770 if (INTEL_GEN(dev_priv) >= 9)
1771 /* no global SR status; inspect per-plane WM */;
1772 else if (HAS_PCH_SPLIT(dev_priv))
1773 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1774 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1775 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1776 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1777 else if (IS_I915GM(dev_priv))
1778 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1779 else if (IS_PINEVIEW(dev_priv))
1780 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1781 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1782 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1784 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1785 intel_runtime_pm_put(dev_priv);
1787 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1792 static int i915_emon_status(struct seq_file *m, void *unused)
1794 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1795 struct drm_device *dev = &dev_priv->drm;
1796 unsigned long temp, chipset, gfx;
1799 if (!IS_GEN5(dev_priv))
1802 ret = mutex_lock_interruptible(&dev->struct_mutex);
1806 temp = i915_mch_val(dev_priv);
1807 chipset = i915_chipset_val(dev_priv);
1808 gfx = i915_gfx_val(dev_priv);
1809 mutex_unlock(&dev->struct_mutex);
1811 seq_printf(m, "GMCH temp: %ld\n", temp);
1812 seq_printf(m, "Chipset power: %ld\n", chipset);
1813 seq_printf(m, "GFX power: %ld\n", gfx);
1814 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1819 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1821 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1823 int gpu_freq, ia_freq;
1824 unsigned int max_gpu_freq, min_gpu_freq;
1826 if (!HAS_LLC(dev_priv)) {
1827 seq_puts(m, "unsupported on this chipset\n");
1831 intel_runtime_pm_get(dev_priv);
1833 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1837 if (IS_GEN9_BC(dev_priv)) {
1838 /* Convert GT frequency to 50 HZ units */
1840 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1842 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1844 min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1845 max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1848 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1850 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1852 sandybridge_pcode_read(dev_priv,
1853 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1855 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1856 intel_gpu_freq(dev_priv, (gpu_freq *
1857 (IS_GEN9_BC(dev_priv) ?
1858 GEN9_FREQ_SCALER : 1))),
1859 ((ia_freq >> 0) & 0xff) * 100,
1860 ((ia_freq >> 8) & 0xff) * 100);
1863 mutex_unlock(&dev_priv->rps.hw_lock);
1866 intel_runtime_pm_put(dev_priv);
1870 static int i915_opregion(struct seq_file *m, void *unused)
1872 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1873 struct drm_device *dev = &dev_priv->drm;
1874 struct intel_opregion *opregion = &dev_priv->opregion;
1877 ret = mutex_lock_interruptible(&dev->struct_mutex);
1881 if (opregion->header)
1882 seq_write(m, opregion->header, OPREGION_SIZE);
1884 mutex_unlock(&dev->struct_mutex);
1890 static int i915_vbt(struct seq_file *m, void *unused)
1892 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1895 seq_write(m, opregion->vbt, opregion->vbt_size);
1900 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1902 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1903 struct drm_device *dev = &dev_priv->drm;
1904 struct intel_framebuffer *fbdev_fb = NULL;
1905 struct drm_framebuffer *drm_fb;
1908 ret = mutex_lock_interruptible(&dev->struct_mutex);
1912 #ifdef CONFIG_DRM_FBDEV_EMULATION
1913 if (dev_priv->fbdev) {
1914 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1916 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1917 fbdev_fb->base.width,
1918 fbdev_fb->base.height,
1919 fbdev_fb->base.format->depth,
1920 fbdev_fb->base.format->cpp[0] * 8,
1921 fbdev_fb->base.modifier,
1922 drm_framebuffer_read_refcount(&fbdev_fb->base));
1923 describe_obj(m, fbdev_fb->obj);
1928 mutex_lock(&dev->mode_config.fb_lock);
1929 drm_for_each_fb(drm_fb, dev) {
1930 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1934 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1937 fb->base.format->depth,
1938 fb->base.format->cpp[0] * 8,
1940 drm_framebuffer_read_refcount(&fb->base));
1941 describe_obj(m, fb->obj);
1944 mutex_unlock(&dev->mode_config.fb_lock);
1945 mutex_unlock(&dev->struct_mutex);
1950 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1952 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1953 ring->space, ring->head, ring->tail);
1956 static int i915_context_status(struct seq_file *m, void *unused)
1958 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1959 struct drm_device *dev = &dev_priv->drm;
1960 struct intel_engine_cs *engine;
1961 struct i915_gem_context *ctx;
1962 enum intel_engine_id id;
1965 ret = mutex_lock_interruptible(&dev->struct_mutex);
1969 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1970 seq_printf(m, "HW context %u ", ctx->hw_id);
1972 struct task_struct *task;
1974 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1976 seq_printf(m, "(%s [%d]) ",
1977 task->comm, task->pid);
1978 put_task_struct(task);
1980 } else if (IS_ERR(ctx->file_priv)) {
1981 seq_puts(m, "(deleted) ");
1983 seq_puts(m, "(kernel) ");
1986 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1989 for_each_engine(engine, dev_priv, id) {
1990 struct intel_context *ce = &ctx->engine[engine->id];
1992 seq_printf(m, "%s: ", engine->name);
1993 seq_putc(m, ce->initialised ? 'I' : 'i');
1995 describe_obj(m, ce->state->obj);
1997 describe_ctx_ring(m, ce->ring);
2002 "\tvma hashtable size=%u (actual %lu), count=%u\n",
2003 ctx->vma_lut.ht_size,
2004 BIT(ctx->vma_lut.ht_bits),
2005 ctx->vma_lut.ht_count);
2010 mutex_unlock(&dev->struct_mutex);
2015 static void i915_dump_lrc_obj(struct seq_file *m,
2016 struct i915_gem_context *ctx,
2017 struct intel_engine_cs *engine)
2019 struct i915_vma *vma = ctx->engine[engine->id].state;
2023 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2026 seq_puts(m, "\tFake context\n");
2030 if (vma->flags & I915_VMA_GLOBAL_BIND)
2031 seq_printf(m, "\tBound in GGTT at 0x%08x\n",
2032 i915_ggtt_offset(vma));
2034 if (i915_gem_object_pin_pages(vma->obj)) {
2035 seq_puts(m, "\tFailed to get pages for context object\n\n");
2039 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
2041 u32 *reg_state = kmap_atomic(page);
2043 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2045 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2047 reg_state[j], reg_state[j + 1],
2048 reg_state[j + 2], reg_state[j + 3]);
2050 kunmap_atomic(reg_state);
2053 i915_gem_object_unpin_pages(vma->obj);
2057 static int i915_dump_lrc(struct seq_file *m, void *unused)
2059 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2060 struct drm_device *dev = &dev_priv->drm;
2061 struct intel_engine_cs *engine;
2062 struct i915_gem_context *ctx;
2063 enum intel_engine_id id;
2066 if (!i915.enable_execlists) {
2067 seq_printf(m, "Logical Ring Contexts are disabled\n");
2071 ret = mutex_lock_interruptible(&dev->struct_mutex);
2075 list_for_each_entry(ctx, &dev_priv->context_list, link)
2076 for_each_engine(engine, dev_priv, id)
2077 i915_dump_lrc_obj(m, ctx, engine);
2079 mutex_unlock(&dev->struct_mutex);
2084 static const char *swizzle_string(unsigned swizzle)
2087 case I915_BIT_6_SWIZZLE_NONE:
2089 case I915_BIT_6_SWIZZLE_9:
2091 case I915_BIT_6_SWIZZLE_9_10:
2092 return "bit9/bit10";
2093 case I915_BIT_6_SWIZZLE_9_11:
2094 return "bit9/bit11";
2095 case I915_BIT_6_SWIZZLE_9_10_11:
2096 return "bit9/bit10/bit11";
2097 case I915_BIT_6_SWIZZLE_9_17:
2098 return "bit9/bit17";
2099 case I915_BIT_6_SWIZZLE_9_10_17:
2100 return "bit9/bit10/bit17";
2101 case I915_BIT_6_SWIZZLE_UNKNOWN:
2108 static int i915_swizzle_info(struct seq_file *m, void *data)
2110 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2112 intel_runtime_pm_get(dev_priv);
2114 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2115 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2116 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2117 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2119 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2120 seq_printf(m, "DDC = 0x%08x\n",
2122 seq_printf(m, "DDC2 = 0x%08x\n",
2124 seq_printf(m, "C0DRB3 = 0x%04x\n",
2125 I915_READ16(C0DRB3));
2126 seq_printf(m, "C1DRB3 = 0x%04x\n",
2127 I915_READ16(C1DRB3));
2128 } else if (INTEL_GEN(dev_priv) >= 6) {
2129 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2130 I915_READ(MAD_DIMM_C0));
2131 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2132 I915_READ(MAD_DIMM_C1));
2133 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2134 I915_READ(MAD_DIMM_C2));
2135 seq_printf(m, "TILECTL = 0x%08x\n",
2136 I915_READ(TILECTL));
2137 if (INTEL_GEN(dev_priv) >= 8)
2138 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2139 I915_READ(GAMTARBMODE));
2141 seq_printf(m, "ARB_MODE = 0x%08x\n",
2142 I915_READ(ARB_MODE));
2143 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2144 I915_READ(DISP_ARB_CTL));
2147 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2148 seq_puts(m, "L-shaped memory detected\n");
2150 intel_runtime_pm_put(dev_priv);
2155 static int per_file_ctx(int id, void *ptr, void *data)
2157 struct i915_gem_context *ctx = ptr;
2158 struct seq_file *m = data;
2159 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2162 seq_printf(m, " no ppgtt for context %d\n",
2167 if (i915_gem_context_is_default(ctx))
2168 seq_puts(m, " default context:\n");
2170 seq_printf(m, " context %d:\n", ctx->user_handle);
2171 ppgtt->debug_dump(ppgtt, m);
2176 static void gen8_ppgtt_info(struct seq_file *m,
2177 struct drm_i915_private *dev_priv)
2179 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2180 struct intel_engine_cs *engine;
2181 enum intel_engine_id id;
2187 for_each_engine(engine, dev_priv, id) {
2188 seq_printf(m, "%s\n", engine->name);
2189 for (i = 0; i < 4; i++) {
2190 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2192 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2193 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2198 static void gen6_ppgtt_info(struct seq_file *m,
2199 struct drm_i915_private *dev_priv)
2201 struct intel_engine_cs *engine;
2202 enum intel_engine_id id;
2204 if (IS_GEN6(dev_priv))
2205 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2207 for_each_engine(engine, dev_priv, id) {
2208 seq_printf(m, "%s\n", engine->name);
2209 if (IS_GEN7(dev_priv))
2210 seq_printf(m, "GFX_MODE: 0x%08x\n",
2211 I915_READ(RING_MODE_GEN7(engine)));
2212 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2213 I915_READ(RING_PP_DIR_BASE(engine)));
2214 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2215 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2216 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2217 I915_READ(RING_PP_DIR_DCLV(engine)));
2219 if (dev_priv->mm.aliasing_ppgtt) {
2220 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2222 seq_puts(m, "aliasing PPGTT:\n");
2223 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2225 ppgtt->debug_dump(ppgtt, m);
2228 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2231 static int i915_ppgtt_info(struct seq_file *m, void *data)
2233 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2234 struct drm_device *dev = &dev_priv->drm;
2235 struct drm_file *file;
2238 mutex_lock(&dev->filelist_mutex);
2239 ret = mutex_lock_interruptible(&dev->struct_mutex);
2243 intel_runtime_pm_get(dev_priv);
2245 if (INTEL_GEN(dev_priv) >= 8)
2246 gen8_ppgtt_info(m, dev_priv);
2247 else if (INTEL_GEN(dev_priv) >= 6)
2248 gen6_ppgtt_info(m, dev_priv);
2250 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2251 struct drm_i915_file_private *file_priv = file->driver_priv;
2252 struct task_struct *task;
2254 task = get_pid_task(file->pid, PIDTYPE_PID);
2259 seq_printf(m, "\nproc: %s\n", task->comm);
2260 put_task_struct(task);
2261 idr_for_each(&file_priv->context_idr, per_file_ctx,
2262 (void *)(unsigned long)m);
2266 intel_runtime_pm_put(dev_priv);
2267 mutex_unlock(&dev->struct_mutex);
2269 mutex_unlock(&dev->filelist_mutex);
2273 static int count_irq_waiters(struct drm_i915_private *i915)
2275 struct intel_engine_cs *engine;
2276 enum intel_engine_id id;
2279 for_each_engine(engine, i915, id)
2280 count += intel_engine_has_waiter(engine);
2285 static const char *rps_power_to_str(unsigned int power)
2287 static const char * const strings[] = {
2288 [LOW_POWER] = "low power",
2289 [BETWEEN] = "mixed",
2290 [HIGH_POWER] = "high power",
2293 if (power >= ARRAY_SIZE(strings) || !strings[power])
2296 return strings[power];
2299 static int i915_rps_boost_info(struct seq_file *m, void *data)
2301 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2302 struct drm_device *dev = &dev_priv->drm;
2303 struct drm_file *file;
2305 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2306 seq_printf(m, "GPU busy? %s [%d requests]\n",
2307 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2308 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2309 seq_printf(m, "Frequency requested %d\n",
2310 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
2311 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2312 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2313 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2314 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2315 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2316 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2317 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
2318 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2319 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
2321 mutex_lock(&dev->filelist_mutex);
2322 spin_lock(&dev_priv->rps.client_lock);
2323 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2324 struct drm_i915_file_private *file_priv = file->driver_priv;
2325 struct task_struct *task;
2328 task = pid_task(file->pid, PIDTYPE_PID);
2329 seq_printf(m, "%s [%d]: %d boosts%s\n",
2330 task ? task->comm : "<unknown>",
2331 task ? task->pid : -1,
2332 file_priv->rps.boosts,
2333 list_empty(&file_priv->rps.link) ? "" : ", active");
2336 seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
2337 spin_unlock(&dev_priv->rps.client_lock);
2338 mutex_unlock(&dev->filelist_mutex);
2340 if (INTEL_GEN(dev_priv) >= 6 &&
2341 dev_priv->rps.enabled &&
2342 dev_priv->gt.active_requests) {
2344 u32 rpdown, rpdownei;
2346 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2347 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2348 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2349 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2350 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2351 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2353 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2354 rps_power_to_str(dev_priv->rps.power));
2355 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
2356 rpup && rpupei ? 100 * rpup / rpupei : 0,
2357 dev_priv->rps.up_threshold);
2358 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
2359 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2360 dev_priv->rps.down_threshold);
2362 seq_puts(m, "\nRPS Autotuning inactive\n");
2368 static int i915_llc(struct seq_file *m, void *data)
2370 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2371 const bool edram = INTEL_GEN(dev_priv) > 8;
2373 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2374 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2375 intel_uncore_edram_size(dev_priv)/1024/1024);
2380 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2382 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2383 struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
2385 if (!HAS_HUC_UCODE(dev_priv))
2388 seq_puts(m, "HuC firmware status:\n");
2389 seq_printf(m, "\tpath: %s\n", huc_fw->path);
2390 seq_printf(m, "\tfetch: %s\n",
2391 intel_uc_fw_status_repr(huc_fw->fetch_status));
2392 seq_printf(m, "\tload: %s\n",
2393 intel_uc_fw_status_repr(huc_fw->load_status));
2394 seq_printf(m, "\tversion wanted: %d.%d\n",
2395 huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
2396 seq_printf(m, "\tversion found: %d.%d\n",
2397 huc_fw->major_ver_found, huc_fw->minor_ver_found);
2398 seq_printf(m, "\theader: offset is %d; size = %d\n",
2399 huc_fw->header_offset, huc_fw->header_size);
2400 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2401 huc_fw->ucode_offset, huc_fw->ucode_size);
2402 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2403 huc_fw->rsa_offset, huc_fw->rsa_size);
2405 intel_runtime_pm_get(dev_priv);
2406 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2407 intel_runtime_pm_put(dev_priv);
2412 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2414 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2415 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
2418 if (!HAS_GUC_UCODE(dev_priv))
2421 seq_printf(m, "GuC firmware status:\n");
2422 seq_printf(m, "\tpath: %s\n",
2424 seq_printf(m, "\tfetch: %s\n",
2425 intel_uc_fw_status_repr(guc_fw->fetch_status));
2426 seq_printf(m, "\tload: %s\n",
2427 intel_uc_fw_status_repr(guc_fw->load_status));
2428 seq_printf(m, "\tversion wanted: %d.%d\n",
2429 guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
2430 seq_printf(m, "\tversion found: %d.%d\n",
2431 guc_fw->major_ver_found, guc_fw->minor_ver_found);
2432 seq_printf(m, "\theader: offset is %d; size = %d\n",
2433 guc_fw->header_offset, guc_fw->header_size);
2434 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2435 guc_fw->ucode_offset, guc_fw->ucode_size);
2436 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2437 guc_fw->rsa_offset, guc_fw->rsa_size);
2439 intel_runtime_pm_get(dev_priv);
2441 tmp = I915_READ(GUC_STATUS);
2443 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2444 seq_printf(m, "\tBootrom status = 0x%x\n",
2445 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2446 seq_printf(m, "\tuKernel status = 0x%x\n",
2447 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2448 seq_printf(m, "\tMIA Core status = 0x%x\n",
2449 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2450 seq_puts(m, "\nScratch registers:\n");
2451 for (i = 0; i < 16; i++)
2452 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2454 intel_runtime_pm_put(dev_priv);
2459 static void i915_guc_log_info(struct seq_file *m,
2460 struct drm_i915_private *dev_priv)
2462 struct intel_guc *guc = &dev_priv->guc;
2464 seq_puts(m, "\nGuC logging stats:\n");
2466 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
2467 guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2468 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2470 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
2471 guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2472 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2474 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2475 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2476 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2478 seq_printf(m, "\tTotal flush interrupt count: %u\n",
2479 guc->log.flush_interrupt_count);
2481 seq_printf(m, "\tCapture miss count: %u\n",
2482 guc->log.capture_miss_count);
2485 static void i915_guc_client_info(struct seq_file *m,
2486 struct drm_i915_private *dev_priv,
2487 struct i915_guc_client *client)
2489 struct intel_engine_cs *engine;
2490 enum intel_engine_id id;
2493 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2494 client->priority, client->stage_id, client->proc_desc_offset);
2495 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
2496 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
2497 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2498 client->wq_size, client->wq_offset, client->wq_tail);
2500 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2502 for_each_engine(engine, dev_priv, id) {
2503 u64 submissions = client->submissions[id];
2505 seq_printf(m, "\tSubmissions: %llu %s\n",
2506 submissions, engine->name);
2508 seq_printf(m, "\tTotal: %llu\n", tot);
2511 static bool check_guc_submission(struct seq_file *m)
2513 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2514 const struct intel_guc *guc = &dev_priv->guc;
2516 if (!guc->execbuf_client) {
2517 seq_printf(m, "GuC submission %s\n",
2518 HAS_GUC_SCHED(dev_priv) ?
2527 static int i915_guc_info(struct seq_file *m, void *data)
2529 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2530 const struct intel_guc *guc = &dev_priv->guc;
2532 if (!check_guc_submission(m))
2535 seq_printf(m, "Doorbell map:\n");
2536 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2537 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2539 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2540 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2542 i915_guc_log_info(m, dev_priv);
2544 /* Add more as required ... */
2549 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2551 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2552 const struct intel_guc *guc = &dev_priv->guc;
2553 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2554 struct i915_guc_client *client = guc->execbuf_client;
2558 if (!check_guc_submission(m))
2561 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2562 struct intel_engine_cs *engine;
2564 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2567 seq_printf(m, "GuC stage descriptor %u:\n", index);
2568 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2569 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2570 seq_printf(m, "\tPriority: %d\n", desc->priority);
2571 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2572 seq_printf(m, "\tEngines used: 0x%x\n",
2573 desc->engines_used);
2574 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2575 desc->db_trigger_phy,
2576 desc->db_trigger_cpu,
2577 desc->db_trigger_uk);
2578 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2579 desc->process_desc);
2580 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2581 desc->wq_addr, desc->wq_size);
2584 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2585 u32 guc_engine_id = engine->guc_id;
2586 struct guc_execlist_context *lrc =
2587 &desc->lrc[guc_engine_id];
2589 seq_printf(m, "\t%s LRC:\n", engine->name);
2590 seq_printf(m, "\t\tContext desc: 0x%x\n",
2592 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2593 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2594 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2595 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2603 static int i915_guc_log_dump(struct seq_file *m, void *data)
2605 struct drm_info_node *node = m->private;
2606 struct drm_i915_private *dev_priv = node_to_i915(node);
2607 bool dump_load_err = !!node->info_ent->data;
2608 struct drm_i915_gem_object *obj = NULL;
2613 obj = dev_priv->guc.load_err_log;
2614 else if (dev_priv->guc.log.vma)
2615 obj = dev_priv->guc.log.vma->obj;
2620 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2622 DRM_DEBUG("Failed to pin object\n");
2623 seq_puts(m, "(log data unaccessible)\n");
2624 return PTR_ERR(log);
2627 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2628 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2629 *(log + i), *(log + i + 1),
2630 *(log + i + 2), *(log + i + 3));
2634 i915_gem_object_unpin_map(obj);
2639 static int i915_guc_log_control_get(void *data, u64 *val)
2641 struct drm_i915_private *dev_priv = data;
2643 if (!dev_priv->guc.log.vma)
2646 *val = i915.guc_log_level;
2651 static int i915_guc_log_control_set(void *data, u64 val)
2653 struct drm_i915_private *dev_priv = data;
2656 if (!dev_priv->guc.log.vma)
2659 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
2663 intel_runtime_pm_get(dev_priv);
2664 ret = i915_guc_log_control(dev_priv, val);
2665 intel_runtime_pm_put(dev_priv);
2667 mutex_unlock(&dev_priv->drm.struct_mutex);
2671 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2672 i915_guc_log_control_get, i915_guc_log_control_set,
2675 static const char *psr2_live_status(u32 val)
2677 static const char * const live_status[] = {
2691 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2692 if (val < ARRAY_SIZE(live_status))
2693 return live_status[val];
2698 static int i915_edp_psr_status(struct seq_file *m, void *data)
2700 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2704 bool enabled = false;
2706 if (!HAS_PSR(dev_priv)) {
2707 seq_puts(m, "PSR not supported\n");
2711 intel_runtime_pm_get(dev_priv);
2713 mutex_lock(&dev_priv->psr.lock);
2714 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2715 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2716 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2717 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2718 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2719 dev_priv->psr.busy_frontbuffer_bits);
2720 seq_printf(m, "Re-enable work scheduled: %s\n",
2721 yesno(work_busy(&dev_priv->psr.work.work)));
2723 if (HAS_DDI(dev_priv)) {
2724 if (dev_priv->psr.psr2_support)
2725 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2727 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2729 for_each_pipe(dev_priv, pipe) {
2730 enum transcoder cpu_transcoder =
2731 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2732 enum intel_display_power_domain power_domain;
2734 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2735 if (!intel_display_power_get_if_enabled(dev_priv,
2739 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2740 VLV_EDP_PSR_CURR_STATE_MASK;
2741 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2742 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2745 intel_display_power_put(dev_priv, power_domain);
2749 seq_printf(m, "Main link in standby mode: %s\n",
2750 yesno(dev_priv->psr.link_standby));
2752 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2754 if (!HAS_DDI(dev_priv))
2755 for_each_pipe(dev_priv, pipe) {
2756 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2757 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2758 seq_printf(m, " pipe %c", pipe_name(pipe));
2763 * VLV/CHV PSR has no kind of performance counter
2764 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2766 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2767 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2768 EDP_PSR_PERF_CNT_MASK;
2770 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2772 if (dev_priv->psr.psr2_support) {
2773 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
2775 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2776 psr2, psr2_live_status(psr2));
2778 mutex_unlock(&dev_priv->psr.lock);
2780 intel_runtime_pm_put(dev_priv);
2784 static int i915_sink_crc(struct seq_file *m, void *data)
2786 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2787 struct drm_device *dev = &dev_priv->drm;
2788 struct intel_connector *connector;
2789 struct drm_connector_list_iter conn_iter;
2790 struct intel_dp *intel_dp = NULL;
2794 drm_modeset_lock_all(dev);
2795 drm_connector_list_iter_begin(dev, &conn_iter);
2796 for_each_intel_connector_iter(connector, &conn_iter) {
2797 struct drm_crtc *crtc;
2799 if (!connector->base.state->best_encoder)
2802 crtc = connector->base.state->crtc;
2803 if (!crtc->state->active)
2806 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2809 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2811 ret = intel_dp_sink_crc(intel_dp, crc);
2815 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2816 crc[0], crc[1], crc[2],
2817 crc[3], crc[4], crc[5]);
2822 drm_connector_list_iter_end(&conn_iter);
2823 drm_modeset_unlock_all(dev);
2827 static int i915_energy_uJ(struct seq_file *m, void *data)
2829 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2833 if (INTEL_GEN(dev_priv) < 6)
2836 intel_runtime_pm_get(dev_priv);
2838 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2839 power = (power & 0x1f00) >> 8;
2840 units = 1000000 / (1 << power); /* convert to uJ */
2841 power = I915_READ(MCH_SECP_NRG_STTS);
2844 intel_runtime_pm_put(dev_priv);
2846 seq_printf(m, "%llu", (long long unsigned)power);
2851 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2853 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2854 struct pci_dev *pdev = dev_priv->drm.pdev;
2856 if (!HAS_RUNTIME_PM(dev_priv))
2857 seq_puts(m, "Runtime power management not supported\n");
2859 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2860 seq_printf(m, "IRQs disabled: %s\n",
2861 yesno(!intel_irqs_enabled(dev_priv)));
2863 seq_printf(m, "Usage count: %d\n",
2864 atomic_read(&dev_priv->drm.dev->power.usage_count));
2866 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2868 seq_printf(m, "PCI device power state: %s [%d]\n",
2869 pci_power_name(pdev->current_state),
2870 pdev->current_state);
2875 static int i915_power_domain_info(struct seq_file *m, void *unused)
2877 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2878 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2881 mutex_lock(&power_domains->lock);
2883 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2884 for (i = 0; i < power_domains->power_well_count; i++) {
2885 struct i915_power_well *power_well;
2886 enum intel_display_power_domain power_domain;
2888 power_well = &power_domains->power_wells[i];
2889 seq_printf(m, "%-25s %d\n", power_well->name,
2892 for_each_power_domain(power_domain, power_well->domains)
2893 seq_printf(m, " %-23s %d\n",
2894 intel_display_power_domain_str(power_domain),
2895 power_domains->domain_use_count[power_domain]);
2898 mutex_unlock(&power_domains->lock);
2903 static int i915_dmc_info(struct seq_file *m, void *unused)
2905 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2906 struct intel_csr *csr;
2908 if (!HAS_CSR(dev_priv)) {
2909 seq_puts(m, "not supported\n");
2913 csr = &dev_priv->csr;
2915 intel_runtime_pm_get(dev_priv);
2917 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2918 seq_printf(m, "path: %s\n", csr->fw_path);
2920 if (!csr->dmc_payload)
2923 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2924 CSR_VERSION_MINOR(csr->version));
2926 if (IS_KABYLAKE(dev_priv) ||
2927 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2928 seq_printf(m, "DC3 -> DC5 count: %d\n",
2929 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2930 seq_printf(m, "DC5 -> DC6 count: %d\n",
2931 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2932 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2933 seq_printf(m, "DC3 -> DC5 count: %d\n",
2934 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2938 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2939 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2940 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2942 intel_runtime_pm_put(dev_priv);
2947 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2948 struct drm_display_mode *mode)
2952 for (i = 0; i < tabs; i++)
2955 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2956 mode->base.id, mode->name,
2957 mode->vrefresh, mode->clock,
2958 mode->hdisplay, mode->hsync_start,
2959 mode->hsync_end, mode->htotal,
2960 mode->vdisplay, mode->vsync_start,
2961 mode->vsync_end, mode->vtotal,
2962 mode->type, mode->flags);
2965 static void intel_encoder_info(struct seq_file *m,
2966 struct intel_crtc *intel_crtc,
2967 struct intel_encoder *intel_encoder)
2969 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2970 struct drm_device *dev = &dev_priv->drm;
2971 struct drm_crtc *crtc = &intel_crtc->base;
2972 struct intel_connector *intel_connector;
2973 struct drm_encoder *encoder;
2975 encoder = &intel_encoder->base;
2976 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2977 encoder->base.id, encoder->name);
2978 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2979 struct drm_connector *connector = &intel_connector->base;
2980 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2983 drm_get_connector_status_name(connector->status));
2984 if (connector->status == connector_status_connected) {
2985 struct drm_display_mode *mode = &crtc->mode;
2986 seq_printf(m, ", mode:\n");
2987 intel_seq_print_mode(m, 2, mode);
2994 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2996 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2997 struct drm_device *dev = &dev_priv->drm;
2998 struct drm_crtc *crtc = &intel_crtc->base;
2999 struct intel_encoder *intel_encoder;
3000 struct drm_plane_state *plane_state = crtc->primary->state;
3001 struct drm_framebuffer *fb = plane_state->fb;
3004 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3005 fb->base.id, plane_state->src_x >> 16,
3006 plane_state->src_y >> 16, fb->width, fb->height);
3008 seq_puts(m, "\tprimary plane disabled\n");
3009 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3010 intel_encoder_info(m, intel_crtc, intel_encoder);
3013 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3015 struct drm_display_mode *mode = panel->fixed_mode;
3017 seq_printf(m, "\tfixed mode:\n");
3018 intel_seq_print_mode(m, 2, mode);
3021 static void intel_dp_info(struct seq_file *m,
3022 struct intel_connector *intel_connector)
3024 struct intel_encoder *intel_encoder = intel_connector->encoder;
3025 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3027 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3028 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3029 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3030 intel_panel_info(m, &intel_connector->panel);
3032 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3036 static void intel_dp_mst_info(struct seq_file *m,
3037 struct intel_connector *intel_connector)
3039 struct intel_encoder *intel_encoder = intel_connector->encoder;
3040 struct intel_dp_mst_encoder *intel_mst =
3041 enc_to_mst(&intel_encoder->base);
3042 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3043 struct intel_dp *intel_dp = &intel_dig_port->dp;
3044 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3045 intel_connector->port);
3047 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3050 static void intel_hdmi_info(struct seq_file *m,
3051 struct intel_connector *intel_connector)
3053 struct intel_encoder *intel_encoder = intel_connector->encoder;
3054 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3056 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3059 static void intel_lvds_info(struct seq_file *m,
3060 struct intel_connector *intel_connector)
3062 intel_panel_info(m, &intel_connector->panel);
3065 static void intel_connector_info(struct seq_file *m,
3066 struct drm_connector *connector)
3068 struct intel_connector *intel_connector = to_intel_connector(connector);
3069 struct intel_encoder *intel_encoder = intel_connector->encoder;
3070 struct drm_display_mode *mode;
3072 seq_printf(m, "connector %d: type %s, status: %s\n",
3073 connector->base.id, connector->name,
3074 drm_get_connector_status_name(connector->status));
3075 if (connector->status == connector_status_connected) {
3076 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3077 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3078 connector->display_info.width_mm,
3079 connector->display_info.height_mm);
3080 seq_printf(m, "\tsubpixel order: %s\n",
3081 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3082 seq_printf(m, "\tCEA rev: %d\n",
3083 connector->display_info.cea_rev);
3089 switch (connector->connector_type) {
3090 case DRM_MODE_CONNECTOR_DisplayPort:
3091 case DRM_MODE_CONNECTOR_eDP:
3092 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3093 intel_dp_mst_info(m, intel_connector);
3095 intel_dp_info(m, intel_connector);
3097 case DRM_MODE_CONNECTOR_LVDS:
3098 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3099 intel_lvds_info(m, intel_connector);
3101 case DRM_MODE_CONNECTOR_HDMIA:
3102 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3103 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
3104 intel_hdmi_info(m, intel_connector);
3110 seq_printf(m, "\tmodes:\n");
3111 list_for_each_entry(mode, &connector->modes, head)
3112 intel_seq_print_mode(m, 2, mode);
3115 static const char *plane_type(enum drm_plane_type type)
3118 case DRM_PLANE_TYPE_OVERLAY:
3120 case DRM_PLANE_TYPE_PRIMARY:
3122 case DRM_PLANE_TYPE_CURSOR:
3125 * Deliberately omitting default: to generate compiler warnings
3126 * when a new drm_plane_type gets added.
3133 static const char *plane_rotation(unsigned int rotation)
3135 static char buf[48];
3137 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3138 * will print them all to visualize if the values are misused
3140 snprintf(buf, sizeof(buf),
3141 "%s%s%s%s%s%s(0x%08x)",
3142 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3143 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3144 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3145 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",