2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <drm/drm_atomic_uapi.h>
50 #include <linux/dma_remapping.h>
51 #include <linux/reservation.h>
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t i8xx_primary_formats[] = {
61 /* Primary plane formats for gen >= 4 */
62 static const uint32_t i965_primary_formats[] = {
67 DRM_FORMAT_XRGB2101010,
68 DRM_FORMAT_XBGR2101010,
71 static const uint64_t i9xx_format_modifiers[] = {
72 I915_FORMAT_MOD_X_TILED,
73 DRM_FORMAT_MOD_LINEAR,
74 DRM_FORMAT_MOD_INVALID
77 static const uint32_t skl_primary_formats[] = {
84 DRM_FORMAT_XRGB2101010,
85 DRM_FORMAT_XBGR2101010,
92 static const uint32_t skl_pri_planar_formats[] = {
99 DRM_FORMAT_XRGB2101010,
100 DRM_FORMAT_XBGR2101010,
108 static const uint64_t skl_format_modifiers_noccs[] = {
109 I915_FORMAT_MOD_Yf_TILED,
110 I915_FORMAT_MOD_Y_TILED,
111 I915_FORMAT_MOD_X_TILED,
112 DRM_FORMAT_MOD_LINEAR,
113 DRM_FORMAT_MOD_INVALID
116 static const uint64_t skl_format_modifiers_ccs[] = {
117 I915_FORMAT_MOD_Yf_TILED_CCS,
118 I915_FORMAT_MOD_Y_TILED_CCS,
119 I915_FORMAT_MOD_Yf_TILED,
120 I915_FORMAT_MOD_Y_TILED,
121 I915_FORMAT_MOD_X_TILED,
122 DRM_FORMAT_MOD_LINEAR,
123 DRM_FORMAT_MOD_INVALID
127 static const uint32_t intel_cursor_formats[] = {
131 static const uint64_t cursor_format_modifiers[] = {
132 DRM_FORMAT_MOD_LINEAR,
133 DRM_FORMAT_MOD_INVALID
136 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
137 struct intel_crtc_state *pipe_config);
138 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
139 struct intel_crtc_state *pipe_config);
141 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
142 struct drm_i915_gem_object *obj,
143 struct drm_mode_fb_cmd2 *mode_cmd);
144 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
145 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
146 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
147 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
148 struct intel_link_m_n *m_n,
149 struct intel_link_m_n *m2_n2);
150 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
151 static void haswell_set_pipeconf(struct drm_crtc *crtc);
152 static void haswell_set_pipemisc(struct drm_crtc *crtc);
153 static void vlv_prepare_pll(struct intel_crtc *crtc,
154 const struct intel_crtc_state *pipe_config);
155 static void chv_prepare_pll(struct intel_crtc *crtc,
156 const struct intel_crtc_state *pipe_config);
157 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
159 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
160 struct intel_crtc_state *crtc_state);
161 static void skylake_pfit_enable(struct intel_crtc *crtc);
162 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
163 static void ironlake_pfit_enable(struct intel_crtc *crtc);
164 static void intel_modeset_setup_hw_state(struct drm_device *dev,
165 struct drm_modeset_acquire_ctx *ctx);
166 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
171 } dot, vco, n, m, m1, m2, p, p1;
175 int p2_slow, p2_fast;
179 /* returns HPLL frequency in kHz */
180 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
182 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
184 /* Obtain SKU information */
185 mutex_lock(&dev_priv->sb_lock);
186 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
187 CCK_FUSE_HPLL_FREQ_MASK;
188 mutex_unlock(&dev_priv->sb_lock);
190 return vco_freq[hpll_freq] * 1000;
193 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
194 const char *name, u32 reg, int ref_freq)
199 mutex_lock(&dev_priv->sb_lock);
200 val = vlv_cck_read(dev_priv, reg);
201 mutex_unlock(&dev_priv->sb_lock);
203 divider = val & CCK_FREQUENCY_VALUES;
205 WARN((val & CCK_FREQUENCY_STATUS) !=
206 (divider << CCK_FREQUENCY_STATUS_SHIFT),
207 "%s change in progress\n", name);
209 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213 const char *name, u32 reg)
215 if (dev_priv->hpll_freq == 0)
216 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
218 return vlv_get_cck_clock(dev_priv, name, reg,
219 dev_priv->hpll_freq);
222 static void intel_update_czclk(struct drm_i915_private *dev_priv)
224 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
227 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
228 CCK_CZ_CLOCK_CONTROL);
230 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
233 static inline u32 /* units of 100MHz */
234 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
235 const struct intel_crtc_state *pipe_config)
237 if (HAS_DDI(dev_priv))
238 return pipe_config->port_clock; /* SPLL */
240 return dev_priv->fdi_pll_freq;
243 static const struct intel_limit intel_limits_i8xx_dac = {
244 .dot = { .min = 25000, .max = 350000 },
245 .vco = { .min = 908000, .max = 1512000 },
246 .n = { .min = 2, .max = 16 },
247 .m = { .min = 96, .max = 140 },
248 .m1 = { .min = 18, .max = 26 },
249 .m2 = { .min = 6, .max = 16 },
250 .p = { .min = 4, .max = 128 },
251 .p1 = { .min = 2, .max = 33 },
252 .p2 = { .dot_limit = 165000,
253 .p2_slow = 4, .p2_fast = 2 },
256 static const struct intel_limit intel_limits_i8xx_dvo = {
257 .dot = { .min = 25000, .max = 350000 },
258 .vco = { .min = 908000, .max = 1512000 },
259 .n = { .min = 2, .max = 16 },
260 .m = { .min = 96, .max = 140 },
261 .m1 = { .min = 18, .max = 26 },
262 .m2 = { .min = 6, .max = 16 },
263 .p = { .min = 4, .max = 128 },
264 .p1 = { .min = 2, .max = 33 },
265 .p2 = { .dot_limit = 165000,
266 .p2_slow = 4, .p2_fast = 4 },
269 static const struct intel_limit intel_limits_i8xx_lvds = {
270 .dot = { .min = 25000, .max = 350000 },
271 .vco = { .min = 908000, .max = 1512000 },
272 .n = { .min = 2, .max = 16 },
273 .m = { .min = 96, .max = 140 },
274 .m1 = { .min = 18, .max = 26 },
275 .m2 = { .min = 6, .max = 16 },
276 .p = { .min = 4, .max = 128 },
277 .p1 = { .min = 1, .max = 6 },
278 .p2 = { .dot_limit = 165000,
279 .p2_slow = 14, .p2_fast = 7 },
282 static const struct intel_limit intel_limits_i9xx_sdvo = {
283 .dot = { .min = 20000, .max = 400000 },
284 .vco = { .min = 1400000, .max = 2800000 },
285 .n = { .min = 1, .max = 6 },
286 .m = { .min = 70, .max = 120 },
287 .m1 = { .min = 8, .max = 18 },
288 .m2 = { .min = 3, .max = 7 },
289 .p = { .min = 5, .max = 80 },
290 .p1 = { .min = 1, .max = 8 },
291 .p2 = { .dot_limit = 200000,
292 .p2_slow = 10, .p2_fast = 5 },
295 static const struct intel_limit intel_limits_i9xx_lvds = {
296 .dot = { .min = 20000, .max = 400000 },
297 .vco = { .min = 1400000, .max = 2800000 },
298 .n = { .min = 1, .max = 6 },
299 .m = { .min = 70, .max = 120 },
300 .m1 = { .min = 8, .max = 18 },
301 .m2 = { .min = 3, .max = 7 },
302 .p = { .min = 7, .max = 98 },
303 .p1 = { .min = 1, .max = 8 },
304 .p2 = { .dot_limit = 112000,
305 .p2_slow = 14, .p2_fast = 7 },
309 static const struct intel_limit intel_limits_g4x_sdvo = {
310 .dot = { .min = 25000, .max = 270000 },
311 .vco = { .min = 1750000, .max = 3500000},
312 .n = { .min = 1, .max = 4 },
313 .m = { .min = 104, .max = 138 },
314 .m1 = { .min = 17, .max = 23 },
315 .m2 = { .min = 5, .max = 11 },
316 .p = { .min = 10, .max = 30 },
317 .p1 = { .min = 1, .max = 3},
318 .p2 = { .dot_limit = 270000,
324 static const struct intel_limit intel_limits_g4x_hdmi = {
325 .dot = { .min = 22000, .max = 400000 },
326 .vco = { .min = 1750000, .max = 3500000},
327 .n = { .min = 1, .max = 4 },
328 .m = { .min = 104, .max = 138 },
329 .m1 = { .min = 16, .max = 23 },
330 .m2 = { .min = 5, .max = 11 },
331 .p = { .min = 5, .max = 80 },
332 .p1 = { .min = 1, .max = 8},
333 .p2 = { .dot_limit = 165000,
334 .p2_slow = 10, .p2_fast = 5 },
337 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
338 .dot = { .min = 20000, .max = 115000 },
339 .vco = { .min = 1750000, .max = 3500000 },
340 .n = { .min = 1, .max = 3 },
341 .m = { .min = 104, .max = 138 },
342 .m1 = { .min = 17, .max = 23 },
343 .m2 = { .min = 5, .max = 11 },
344 .p = { .min = 28, .max = 112 },
345 .p1 = { .min = 2, .max = 8 },
346 .p2 = { .dot_limit = 0,
347 .p2_slow = 14, .p2_fast = 14
351 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
352 .dot = { .min = 80000, .max = 224000 },
353 .vco = { .min = 1750000, .max = 3500000 },
354 .n = { .min = 1, .max = 3 },
355 .m = { .min = 104, .max = 138 },
356 .m1 = { .min = 17, .max = 23 },
357 .m2 = { .min = 5, .max = 11 },
358 .p = { .min = 14, .max = 42 },
359 .p1 = { .min = 2, .max = 6 },
360 .p2 = { .dot_limit = 0,
361 .p2_slow = 7, .p2_fast = 7
365 static const struct intel_limit intel_limits_pineview_sdvo = {
366 .dot = { .min = 20000, .max = 400000},
367 .vco = { .min = 1700000, .max = 3500000 },
368 /* Pineview's Ncounter is a ring counter */
369 .n = { .min = 3, .max = 6 },
370 .m = { .min = 2, .max = 256 },
371 /* Pineview only has one combined m divider, which we treat as m2. */
372 .m1 = { .min = 0, .max = 0 },
373 .m2 = { .min = 0, .max = 254 },
374 .p = { .min = 5, .max = 80 },
375 .p1 = { .min = 1, .max = 8 },
376 .p2 = { .dot_limit = 200000,
377 .p2_slow = 10, .p2_fast = 5 },
380 static const struct intel_limit intel_limits_pineview_lvds = {
381 .dot = { .min = 20000, .max = 400000 },
382 .vco = { .min = 1700000, .max = 3500000 },
383 .n = { .min = 3, .max = 6 },
384 .m = { .min = 2, .max = 256 },
385 .m1 = { .min = 0, .max = 0 },
386 .m2 = { .min = 0, .max = 254 },
387 .p = { .min = 7, .max = 112 },
388 .p1 = { .min = 1, .max = 8 },
389 .p2 = { .dot_limit = 112000,
390 .p2_slow = 14, .p2_fast = 14 },
393 /* Ironlake / Sandybridge
395 * We calculate clock using (register_value + 2) for N/M1/M2, so here
396 * the range value for them is (actual_value - 2).
398 static const struct intel_limit intel_limits_ironlake_dac = {
399 .dot = { .min = 25000, .max = 350000 },
400 .vco = { .min = 1760000, .max = 3510000 },
401 .n = { .min = 1, .max = 5 },
402 .m = { .min = 79, .max = 127 },
403 .m1 = { .min = 12, .max = 22 },
404 .m2 = { .min = 5, .max = 9 },
405 .p = { .min = 5, .max = 80 },
406 .p1 = { .min = 1, .max = 8 },
407 .p2 = { .dot_limit = 225000,
408 .p2_slow = 10, .p2_fast = 5 },
411 static const struct intel_limit intel_limits_ironlake_single_lvds = {
412 .dot = { .min = 25000, .max = 350000 },
413 .vco = { .min = 1760000, .max = 3510000 },
414 .n = { .min = 1, .max = 3 },
415 .m = { .min = 79, .max = 118 },
416 .m1 = { .min = 12, .max = 22 },
417 .m2 = { .min = 5, .max = 9 },
418 .p = { .min = 28, .max = 112 },
419 .p1 = { .min = 2, .max = 8 },
420 .p2 = { .dot_limit = 225000,
421 .p2_slow = 14, .p2_fast = 14 },
424 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
425 .dot = { .min = 25000, .max = 350000 },
426 .vco = { .min = 1760000, .max = 3510000 },
427 .n = { .min = 1, .max = 3 },
428 .m = { .min = 79, .max = 127 },
429 .m1 = { .min = 12, .max = 22 },
430 .m2 = { .min = 5, .max = 9 },
431 .p = { .min = 14, .max = 56 },
432 .p1 = { .min = 2, .max = 8 },
433 .p2 = { .dot_limit = 225000,
434 .p2_slow = 7, .p2_fast = 7 },
437 /* LVDS 100mhz refclk limits. */
438 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
439 .dot = { .min = 25000, .max = 350000 },
440 .vco = { .min = 1760000, .max = 3510000 },
441 .n = { .min = 1, .max = 2 },
442 .m = { .min = 79, .max = 126 },
443 .m1 = { .min = 12, .max = 22 },
444 .m2 = { .min = 5, .max = 9 },
445 .p = { .min = 28, .max = 112 },
446 .p1 = { .min = 2, .max = 8 },
447 .p2 = { .dot_limit = 225000,
448 .p2_slow = 14, .p2_fast = 14 },
451 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
452 .dot = { .min = 25000, .max = 350000 },
453 .vco = { .min = 1760000, .max = 3510000 },
454 .n = { .min = 1, .max = 3 },
455 .m = { .min = 79, .max = 126 },
456 .m1 = { .min = 12, .max = 22 },
457 .m2 = { .min = 5, .max = 9 },
458 .p = { .min = 14, .max = 42 },
459 .p1 = { .min = 2, .max = 6 },
460 .p2 = { .dot_limit = 225000,
461 .p2_slow = 7, .p2_fast = 7 },
464 static const struct intel_limit intel_limits_vlv = {
466 * These are the data rate limits (measured in fast clocks)
467 * since those are the strictest limits we have. The fast
468 * clock and actual rate limits are more relaxed, so checking
469 * them would make no difference.
471 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
472 .vco = { .min = 4000000, .max = 6000000 },
473 .n = { .min = 1, .max = 7 },
474 .m1 = { .min = 2, .max = 3 },
475 .m2 = { .min = 11, .max = 156 },
476 .p1 = { .min = 2, .max = 3 },
477 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
480 static const struct intel_limit intel_limits_chv = {
482 * These are the data rate limits (measured in fast clocks)
483 * since those are the strictest limits we have. The fast
484 * clock and actual rate limits are more relaxed, so checking
485 * them would make no difference.
487 .dot = { .min = 25000 * 5, .max = 540000 * 5},
488 .vco = { .min = 4800000, .max = 6480000 },
489 .n = { .min = 1, .max = 1 },
490 .m1 = { .min = 2, .max = 2 },
491 .m2 = { .min = 24 << 22, .max = 175 << 22 },
492 .p1 = { .min = 2, .max = 4 },
493 .p2 = { .p2_slow = 1, .p2_fast = 14 },
496 static const struct intel_limit intel_limits_bxt = {
497 /* FIXME: find real dot limits */
498 .dot = { .min = 0, .max = INT_MAX },
499 .vco = { .min = 4800000, .max = 6700000 },
500 .n = { .min = 1, .max = 1 },
501 .m1 = { .min = 2, .max = 2 },
502 /* FIXME: find real m2 limits */
503 .m2 = { .min = 2 << 22, .max = 255 << 22 },
504 .p1 = { .min = 2, .max = 4 },
505 .p2 = { .p2_slow = 1, .p2_fast = 20 },
509 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
511 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
515 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
517 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
521 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
523 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
527 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
530 I915_WRITE(CLKGATE_DIS_PSL(pipe),
531 I915_READ(CLKGATE_DIS_PSL(pipe)) &
532 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
536 needs_modeset(const struct drm_crtc_state *state)
538 return drm_atomic_crtc_needs_modeset(state);
542 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
543 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
544 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
545 * The helpers' return value is the rate of the clock that is fed to the
546 * display engine's pipe which can be the above fast dot clock rate or a
547 * divided-down version of it.
549 /* m1 is reserved as 0 in Pineview, n is a ring counter */
550 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
552 clock->m = clock->m2 + 2;
553 clock->p = clock->p1 * clock->p2;
554 if (WARN_ON(clock->n == 0 || clock->p == 0))
556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
562 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
564 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
567 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
569 clock->m = i9xx_dpll_compute_m(clock);
570 clock->p = clock->p1 * clock->p2;
571 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
573 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
574 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
579 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
581 clock->m = clock->m1 * clock->m2;
582 clock->p = clock->p1 * clock->p2;
583 if (WARN_ON(clock->n == 0 || clock->p == 0))
585 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
586 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
588 return clock->dot / 5;
591 int chv_calc_dpll_params(int refclk, struct dpll *clock)
593 clock->m = clock->m1 * clock->m2;
594 clock->p = clock->p1 * clock->p2;
595 if (WARN_ON(clock->n == 0 || clock->p == 0))
597 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
599 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
601 return clock->dot / 5;
604 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
607 * Returns whether the given set of divisors are valid for a given refclk with
608 * the given connectors.
610 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
611 const struct intel_limit *limit,
612 const struct dpll *clock)
614 if (clock->n < limit->n.min || limit->n.max < clock->n)
615 INTELPllInvalid("n out of range\n");
616 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
617 INTELPllInvalid("p1 out of range\n");
618 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
619 INTELPllInvalid("m2 out of range\n");
620 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
621 INTELPllInvalid("m1 out of range\n");
623 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
624 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
625 if (clock->m1 <= clock->m2)
626 INTELPllInvalid("m1 <= m2\n");
628 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
629 !IS_GEN9_LP(dev_priv)) {
630 if (clock->p < limit->p.min || limit->p.max < clock->p)
631 INTELPllInvalid("p out of range\n");
632 if (clock->m < limit->m.min || limit->m.max < clock->m)
633 INTELPllInvalid("m out of range\n");
636 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
637 INTELPllInvalid("vco out of range\n");
638 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
639 * connector, etc., rather than just a single range.
641 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
642 INTELPllInvalid("dot out of range\n");
648 i9xx_select_p2_div(const struct intel_limit *limit,
649 const struct intel_crtc_state *crtc_state,
652 struct drm_device *dev = crtc_state->base.crtc->dev;
654 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
656 * For LVDS just rely on its current settings for dual-channel.
657 * We haven't figured out how to reliably set up different
658 * single/dual channel state, if we even can.
660 if (intel_is_dual_link_lvds(dev))
661 return limit->p2.p2_fast;
663 return limit->p2.p2_slow;
665 if (target < limit->p2.dot_limit)
666 return limit->p2.p2_slow;
668 return limit->p2.p2_fast;
673 * Returns a set of divisors for the desired target clock with the given
674 * refclk, or FALSE. The returned values represent the clock equation:
675 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
677 * Target and reference clocks are specified in kHz.
679 * If match_clock is provided, then best_clock P divider must match the P
680 * divider from @match_clock used for LVDS downclocking.
683 i9xx_find_best_dpll(const struct intel_limit *limit,
684 struct intel_crtc_state *crtc_state,
685 int target, int refclk, struct dpll *match_clock,
686 struct dpll *best_clock)
688 struct drm_device *dev = crtc_state->base.crtc->dev;
692 memset(best_clock, 0, sizeof(*best_clock));
694 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
696 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
698 for (clock.m2 = limit->m2.min;
699 clock.m2 <= limit->m2.max; clock.m2++) {
700 if (clock.m2 >= clock.m1)
702 for (clock.n = limit->n.min;
703 clock.n <= limit->n.max; clock.n++) {
704 for (clock.p1 = limit->p1.min;
705 clock.p1 <= limit->p1.max; clock.p1++) {
708 i9xx_calc_dpll_params(refclk, &clock);
709 if (!intel_PLL_is_valid(to_i915(dev),
714 clock.p != match_clock->p)
717 this_err = abs(clock.dot - target);
718 if (this_err < err) {
727 return (err != target);
731 * Returns a set of divisors for the desired target clock with the given
732 * refclk, or FALSE. The returned values represent the clock equation:
733 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
735 * Target and reference clocks are specified in kHz.
737 * If match_clock is provided, then best_clock P divider must match the P
738 * divider from @match_clock used for LVDS downclocking.
741 pnv_find_best_dpll(const struct intel_limit *limit,
742 struct intel_crtc_state *crtc_state,
743 int target, int refclk, struct dpll *match_clock,
744 struct dpll *best_clock)
746 struct drm_device *dev = crtc_state->base.crtc->dev;
750 memset(best_clock, 0, sizeof(*best_clock));
752 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
754 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
756 for (clock.m2 = limit->m2.min;
757 clock.m2 <= limit->m2.max; clock.m2++) {
758 for (clock.n = limit->n.min;
759 clock.n <= limit->n.max; clock.n++) {
760 for (clock.p1 = limit->p1.min;
761 clock.p1 <= limit->p1.max; clock.p1++) {
764 pnv_calc_dpll_params(refclk, &clock);
765 if (!intel_PLL_is_valid(to_i915(dev),
770 clock.p != match_clock->p)
773 this_err = abs(clock.dot - target);
774 if (this_err < err) {
783 return (err != target);
787 * Returns a set of divisors for the desired target clock with the given
788 * refclk, or FALSE. The returned values represent the clock equation:
789 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
791 * Target and reference clocks are specified in kHz.
793 * If match_clock is provided, then best_clock P divider must match the P
794 * divider from @match_clock used for LVDS downclocking.
797 g4x_find_best_dpll(const struct intel_limit *limit,
798 struct intel_crtc_state *crtc_state,
799 int target, int refclk, struct dpll *match_clock,
800 struct dpll *best_clock)
802 struct drm_device *dev = crtc_state->base.crtc->dev;
806 /* approximately equals target * 0.00585 */
807 int err_most = (target >> 8) + (target >> 9);
809 memset(best_clock, 0, sizeof(*best_clock));
811 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
813 max_n = limit->n.max;
814 /* based on hardware requirement, prefer smaller n to precision */
815 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
816 /* based on hardware requirement, prefere larger m1,m2 */
817 for (clock.m1 = limit->m1.max;
818 clock.m1 >= limit->m1.min; clock.m1--) {
819 for (clock.m2 = limit->m2.max;
820 clock.m2 >= limit->m2.min; clock.m2--) {
821 for (clock.p1 = limit->p1.max;
822 clock.p1 >= limit->p1.min; clock.p1--) {
825 i9xx_calc_dpll_params(refclk, &clock);
826 if (!intel_PLL_is_valid(to_i915(dev),
831 this_err = abs(clock.dot - target);
832 if (this_err < err_most) {
846 * Check if the calculated PLL configuration is more optimal compared to the
847 * best configuration and error found so far. Return the calculated error.
849 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
850 const struct dpll *calculated_clock,
851 const struct dpll *best_clock,
852 unsigned int best_error_ppm,
853 unsigned int *error_ppm)
856 * For CHV ignore the error and consider only the P value.
857 * Prefer a bigger P value based on HW requirements.
859 if (IS_CHERRYVIEW(to_i915(dev))) {
862 return calculated_clock->p > best_clock->p;
865 if (WARN_ON_ONCE(!target_freq))
868 *error_ppm = div_u64(1000000ULL *
869 abs(target_freq - calculated_clock->dot),
872 * Prefer a better P value over a better (smaller) error if the error
873 * is small. Ensure this preference for future configurations too by
874 * setting the error to 0.
876 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
882 return *error_ppm + 10 < best_error_ppm;
886 * Returns a set of divisors for the desired target clock with the given
887 * refclk, or FALSE. The returned values represent the clock equation:
888 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
891 vlv_find_best_dpll(const struct intel_limit *limit,
892 struct intel_crtc_state *crtc_state,
893 int target, int refclk, struct dpll *match_clock,
894 struct dpll *best_clock)
896 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
897 struct drm_device *dev = crtc->base.dev;
899 unsigned int bestppm = 1000000;
900 /* min update 19.2 MHz */
901 int max_n = min(limit->n.max, refclk / 19200);
904 target *= 5; /* fast clock */
906 memset(best_clock, 0, sizeof(*best_clock));
908 /* based on hardware requirement, prefer smaller n to precision */
909 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
910 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
911 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
912 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
913 clock.p = clock.p1 * clock.p2;
914 /* based on hardware requirement, prefer bigger m1,m2 values */
915 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
918 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
921 vlv_calc_dpll_params(refclk, &clock);
923 if (!intel_PLL_is_valid(to_i915(dev),
928 if (!vlv_PLL_is_optimal(dev, target,
946 * Returns a set of divisors for the desired target clock with the given
947 * refclk, or FALSE. The returned values represent the clock equation:
948 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
951 chv_find_best_dpll(const struct intel_limit *limit,
952 struct intel_crtc_state *crtc_state,
953 int target, int refclk, struct dpll *match_clock,
954 struct dpll *best_clock)
956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
957 struct drm_device *dev = crtc->base.dev;
958 unsigned int best_error_ppm;
963 memset(best_clock, 0, sizeof(*best_clock));
964 best_error_ppm = 1000000;
967 * Based on hardware doc, the n always set to 1, and m1 always
968 * set to 2. If requires to support 200Mhz refclk, we need to
969 * revisit this because n may not 1 anymore.
971 clock.n = 1, clock.m1 = 2;
972 target *= 5; /* fast clock */
974 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
975 for (clock.p2 = limit->p2.p2_fast;
976 clock.p2 >= limit->p2.p2_slow;
977 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
978 unsigned int error_ppm;
980 clock.p = clock.p1 * clock.p2;
982 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
983 clock.n) << 22, refclk * clock.m1);
985 if (m2 > INT_MAX/clock.m1)
990 chv_calc_dpll_params(refclk, &clock);
992 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
995 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
996 best_error_ppm, &error_ppm))
1000 best_error_ppm = error_ppm;
1008 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1009 struct dpll *best_clock)
1011 int refclk = 100000;
1012 const struct intel_limit *limit = &intel_limits_bxt;
1014 return chv_find_best_dpll(limit, crtc_state,
1015 target_clock, refclk, NULL, best_clock);
1018 bool intel_crtc_active(struct intel_crtc *crtc)
1020 /* Be paranoid as we can arrive here with only partial
1021 * state retrieved from the hardware during setup.
1023 * We can ditch the adjusted_mode.crtc_clock check as soon
1024 * as Haswell has gained clock readout/fastboot support.
1026 * We can ditch the crtc->primary->state->fb check as soon as we can
1027 * properly reconstruct framebuffers.
1029 * FIXME: The intel_crtc->active here should be switched to
1030 * crtc->state->active once we have proper CRTC states wired up
1033 return crtc->active && crtc->base.primary->state->fb &&
1034 crtc->config->base.adjusted_mode.crtc_clock;
1037 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1040 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1042 return crtc->config->cpu_transcoder;
1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1048 i915_reg_t reg = PIPEDSL(pipe);
1052 if (IS_GEN2(dev_priv))
1053 line_mask = DSL_LINEMASK_GEN2;
1055 line_mask = DSL_LINEMASK_GEN3;
1057 line1 = I915_READ(reg) & line_mask;
1059 line2 = I915_READ(reg) & line_mask;
1061 return line1 != line2;
1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1067 enum pipe pipe = crtc->pipe;
1069 /* Wait for the display line to settle/start moving */
1070 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1071 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1072 pipe_name(pipe), onoff(state));
1075 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1077 wait_for_pipe_scanline_moving(crtc, false);
1080 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1082 wait_for_pipe_scanline_moving(crtc, true);
1086 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1088 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1089 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091 if (INTEL_GEN(dev_priv) >= 4) {
1092 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1093 i915_reg_t reg = PIPECONF(cpu_transcoder);
1095 /* Wait for the Pipe State to go off */
1096 if (intel_wait_for_register(dev_priv,
1097 reg, I965_PIPECONF_ACTIVE, 0,
1099 WARN(1, "pipe_off wait timed out\n");
1101 intel_wait_for_pipe_scanline_stopped(crtc);
1105 /* Only for pre-ILK configs */
1106 void assert_pll(struct drm_i915_private *dev_priv,
1107 enum pipe pipe, bool state)
1112 val = I915_READ(DPLL(pipe));
1113 cur_state = !!(val & DPLL_VCO_ENABLE);
1114 I915_STATE_WARN(cur_state != state,
1115 "PLL state assertion failure (expected %s, current %s)\n",
1116 onoff(state), onoff(cur_state));
1119 /* XXX: the dsi pll is shared between MIPI DSI ports */
1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1125 mutex_lock(&dev_priv->sb_lock);
1126 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1127 mutex_unlock(&dev_priv->sb_lock);
1129 cur_state = val & DSI_PLL_VCO_EN;
1130 I915_STATE_WARN(cur_state != state,
1131 "DSI PLL state assertion failure (expected %s, current %s)\n",
1132 onoff(state), onoff(cur_state));
1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1136 enum pipe pipe, bool state)
1139 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1142 if (HAS_DDI(dev_priv)) {
1143 /* DDI does not have a specific FDI_TX register */
1144 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1145 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1147 u32 val = I915_READ(FDI_TX_CTL(pipe));
1148 cur_state = !!(val & FDI_TX_ENABLE);
1150 I915_STATE_WARN(cur_state != state,
1151 "FDI TX state assertion failure (expected %s, current %s)\n",
1152 onoff(state), onoff(cur_state));
1154 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1155 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1157 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1158 enum pipe pipe, bool state)
1163 val = I915_READ(FDI_RX_CTL(pipe));
1164 cur_state = !!(val & FDI_RX_ENABLE);
1165 I915_STATE_WARN(cur_state != state,
1166 "FDI RX state assertion failure (expected %s, current %s)\n",
1167 onoff(state), onoff(cur_state));
1169 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1170 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1172 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1177 /* ILK FDI PLL is always enabled */
1178 if (IS_GEN5(dev_priv))
1181 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1182 if (HAS_DDI(dev_priv))
1185 val = I915_READ(FDI_TX_CTL(pipe));
1186 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1189 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1190 enum pipe pipe, bool state)
1195 val = I915_READ(FDI_RX_CTL(pipe));
1196 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1197 I915_STATE_WARN(cur_state != state,
1198 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1199 onoff(state), onoff(cur_state));
1202 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1206 enum pipe panel_pipe = INVALID_PIPE;
1209 if (WARN_ON(HAS_DDI(dev_priv)))
1212 if (HAS_PCH_SPLIT(dev_priv)) {
1215 pp_reg = PP_CONTROL(0);
1216 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1219 case PANEL_PORT_SELECT_LVDS:
1220 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1222 case PANEL_PORT_SELECT_DPA:
1223 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1225 case PANEL_PORT_SELECT_DPC:
1226 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1228 case PANEL_PORT_SELECT_DPD:
1229 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1232 MISSING_CASE(port_sel);
1235 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1236 /* presumably write lock depends on pipe, not port select */
1237 pp_reg = PP_CONTROL(pipe);
1242 pp_reg = PP_CONTROL(0);
1243 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1245 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1246 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1249 val = I915_READ(pp_reg);
1250 if (!(val & PANEL_POWER_ON) ||
1251 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1254 I915_STATE_WARN(panel_pipe == pipe && locked,
1255 "panel assertion failure, pipe %c regs locked\n",
1259 void assert_pipe(struct drm_i915_private *dev_priv,
1260 enum pipe pipe, bool state)
1263 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1265 enum intel_display_power_domain power_domain;
1267 /* we keep both pipes enabled on 830 */
1268 if (IS_I830(dev_priv))
1271 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1272 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1273 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1274 cur_state = !!(val & PIPECONF_ENABLE);
1276 intel_display_power_put(dev_priv, power_domain);
1281 I915_STATE_WARN(cur_state != state,
1282 "pipe %c assertion failure (expected %s, current %s)\n",
1283 pipe_name(pipe), onoff(state), onoff(cur_state));
1286 static void assert_plane(struct intel_plane *plane, bool state)
1291 cur_state = plane->get_hw_state(plane, &pipe);
1293 I915_STATE_WARN(cur_state != state,
1294 "%s assertion failure (expected %s, current %s)\n",
1295 plane->base.name, onoff(state), onoff(cur_state));
1298 #define assert_plane_enabled(p) assert_plane(p, true)
1299 #define assert_plane_disabled(p) assert_plane(p, false)
1301 static void assert_planes_disabled(struct intel_crtc *crtc)
1303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1304 struct intel_plane *plane;
1306 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1307 assert_plane_disabled(plane);
1310 static void assert_vblank_disabled(struct drm_crtc *crtc)
1312 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1313 drm_crtc_vblank_put(crtc);
1316 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1322 val = I915_READ(PCH_TRANSCONF(pipe));
1323 enabled = !!(val & TRANS_ENABLE);
1324 I915_STATE_WARN(enabled,
1325 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1329 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1330 enum pipe pipe, enum port port,
1333 enum pipe port_pipe;
1336 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1338 I915_STATE_WARN(state && port_pipe == pipe,
1339 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1340 port_name(port), pipe_name(pipe));
1342 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1343 "IBX PCH DP %c still using transcoder B\n",
1347 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1348 enum pipe pipe, enum port port,
1349 i915_reg_t hdmi_reg)
1351 enum pipe port_pipe;
1354 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1356 I915_STATE_WARN(state && port_pipe == pipe,
1357 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1358 port_name(port), pipe_name(pipe));
1360 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1361 "IBX PCH HDMI %c still using transcoder B\n",
1365 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1368 enum pipe port_pipe;
1370 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1371 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1372 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1374 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1376 "PCH VGA enabled on transcoder %c, should be disabled\n",
1379 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1381 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1384 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1389 static void _vlv_enable_pll(struct intel_crtc *crtc,
1390 const struct intel_crtc_state *pipe_config)
1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393 enum pipe pipe = crtc->pipe;
1395 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1396 POSTING_READ(DPLL(pipe));
1399 if (intel_wait_for_register(dev_priv,
1404 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1407 static void vlv_enable_pll(struct intel_crtc *crtc,
1408 const struct intel_crtc_state *pipe_config)
1410 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411 enum pipe pipe = crtc->pipe;
1413 assert_pipe_disabled(dev_priv, pipe);
1415 /* PLL is protected by panel, make sure we can write it */
1416 assert_panel_unlocked(dev_priv, pipe);
1418 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419 _vlv_enable_pll(crtc, pipe_config);
1421 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1422 POSTING_READ(DPLL_MD(pipe));
1426 static void _chv_enable_pll(struct intel_crtc *crtc,
1427 const struct intel_crtc_state *pipe_config)
1429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1430 enum pipe pipe = crtc->pipe;
1431 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1434 mutex_lock(&dev_priv->sb_lock);
1436 /* Enable back the 10bit clock to display controller */
1437 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1438 tmp |= DPIO_DCLKP_EN;
1439 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1441 mutex_unlock(&dev_priv->sb_lock);
1444 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1449 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1451 /* Check PLL is locked */
1452 if (intel_wait_for_register(dev_priv,
1453 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1455 DRM_ERROR("PLL %d failed to lock\n", pipe);
1458 static void chv_enable_pll(struct intel_crtc *crtc,
1459 const struct intel_crtc_state *pipe_config)
1461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1462 enum pipe pipe = crtc->pipe;
1464 assert_pipe_disabled(dev_priv, pipe);
1466 /* PLL is protected by panel, make sure we can write it */
1467 assert_panel_unlocked(dev_priv, pipe);
1469 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1470 _chv_enable_pll(crtc, pipe_config);
1472 if (pipe != PIPE_A) {
1474 * WaPixelRepeatModeFixForC0:chv
1476 * DPLLCMD is AWOL. Use chicken bits to propagate
1477 * the value from DPLLBMD to either pipe B or C.
1479 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1480 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1481 I915_WRITE(CBR4_VLV, 0);
1482 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1485 * DPLLB VGA mode also seems to cause problems.
1486 * We should always have it disabled.
1488 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1490 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1491 POSTING_READ(DPLL_MD(pipe));
1495 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1497 struct intel_crtc *crtc;
1500 for_each_intel_crtc(&dev_priv->drm, crtc) {
1501 count += crtc->base.state->active &&
1502 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509 const struct intel_crtc_state *crtc_state)
1511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512 i915_reg_t reg = DPLL(crtc->pipe);
1513 u32 dpll = crtc_state->dpll_hw_state.dpll;
1516 assert_pipe_disabled(dev_priv, crtc->pipe);
1518 /* PLL is protected by panel, make sure we can write it */
1519 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1520 assert_panel_unlocked(dev_priv, crtc->pipe);
1522 /* Enable DVO 2x clock on both PLLs if necessary */
1523 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1525 * It appears to be important that we don't enable this
1526 * for the current pipe before otherwise configuring the
1527 * PLL. No idea how this should be handled if multiple
1528 * DVO outputs are enabled simultaneosly.
1530 dpll |= DPLL_DVO_2X_MODE;
1531 I915_WRITE(DPLL(!crtc->pipe),
1532 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1536 * Apparently we need to have VGA mode enabled prior to changing
1537 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1538 * dividers, even though the register value does change.
1542 I915_WRITE(reg, dpll);
1544 /* Wait for the clocks to stabilize. */
1548 if (INTEL_GEN(dev_priv) >= 4) {
1549 I915_WRITE(DPLL_MD(crtc->pipe),
1550 crtc_state->dpll_hw_state.dpll_md);
1552 /* The pixel multiplier can only be updated once the
1553 * DPLL is enabled and the clocks are stable.
1555 * So write it again.
1557 I915_WRITE(reg, dpll);
1560 /* We do this three times for luck */
1561 for (i = 0; i < 3; i++) {
1562 I915_WRITE(reg, dpll);
1564 udelay(150); /* wait for warmup */
1568 static void i9xx_disable_pll(struct intel_crtc *crtc)
1570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571 enum pipe pipe = crtc->pipe;
1573 /* Disable DVO 2x clock on both PLLs if necessary */
1574 if (IS_I830(dev_priv) &&
1575 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1576 !intel_num_dvo_pipes(dev_priv)) {
1577 I915_WRITE(DPLL(PIPE_B),
1578 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1579 I915_WRITE(DPLL(PIPE_A),
1580 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1583 /* Don't disable pipe or pipe PLLs if needed */
1584 if (IS_I830(dev_priv))
1587 /* Make sure the pipe isn't still relying on us */
1588 assert_pipe_disabled(dev_priv, pipe);
1590 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1591 POSTING_READ(DPLL(pipe));
1594 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1598 /* Make sure the pipe isn't still relying on us */
1599 assert_pipe_disabled(dev_priv, pipe);
1601 val = DPLL_INTEGRATED_REF_CLK_VLV |
1602 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1604 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1606 I915_WRITE(DPLL(pipe), val);
1607 POSTING_READ(DPLL(pipe));
1610 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1612 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1615 /* Make sure the pipe isn't still relying on us */
1616 assert_pipe_disabled(dev_priv, pipe);
1618 val = DPLL_SSC_REF_CLK_CHV |
1619 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1621 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1623 I915_WRITE(DPLL(pipe), val);
1624 POSTING_READ(DPLL(pipe));
1626 mutex_lock(&dev_priv->sb_lock);
1628 /* Disable 10bit clock to display controller */
1629 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1630 val &= ~DPIO_DCLKP_EN;
1631 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1633 mutex_unlock(&dev_priv->sb_lock);
1636 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1637 struct intel_digital_port *dport,
1638 unsigned int expected_mask)
1641 i915_reg_t dpll_reg;
1643 switch (dport->base.port) {
1645 port_mask = DPLL_PORTB_READY_MASK;
1649 port_mask = DPLL_PORTC_READY_MASK;
1651 expected_mask <<= 4;
1654 port_mask = DPLL_PORTD_READY_MASK;
1655 dpll_reg = DPIO_PHY_STATUS;
1661 if (intel_wait_for_register(dev_priv,
1662 dpll_reg, port_mask, expected_mask,
1664 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1665 port_name(dport->base.port),
1666 I915_READ(dpll_reg) & port_mask, expected_mask);
1669 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1672 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1675 uint32_t val, pipeconf_val;
1677 /* Make sure PCH DPLL is enabled */
1678 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1680 /* FDI must be feeding us bits for PCH ports */
1681 assert_fdi_tx_enabled(dev_priv, pipe);
1682 assert_fdi_rx_enabled(dev_priv, pipe);
1684 if (HAS_PCH_CPT(dev_priv)) {
1685 /* Workaround: Set the timing override bit before enabling the
1686 * pch transcoder. */
1687 reg = TRANS_CHICKEN2(pipe);
1688 val = I915_READ(reg);
1689 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1690 I915_WRITE(reg, val);
1693 reg = PCH_TRANSCONF(pipe);
1694 val = I915_READ(reg);
1695 pipeconf_val = I915_READ(PIPECONF(pipe));
1697 if (HAS_PCH_IBX(dev_priv)) {
1699 * Make the BPC in transcoder be consistent with
1700 * that in pipeconf reg. For HDMI we must use 8bpc
1701 * here for both 8bpc and 12bpc.
1703 val &= ~PIPECONF_BPC_MASK;
1704 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1705 val |= PIPECONF_8BPC;
1707 val |= pipeconf_val & PIPECONF_BPC_MASK;
1710 val &= ~TRANS_INTERLACE_MASK;
1711 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1712 if (HAS_PCH_IBX(dev_priv) &&
1713 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1714 val |= TRANS_LEGACY_INTERLACED_ILK;
1716 val |= TRANS_INTERLACED;
1718 val |= TRANS_PROGRESSIVE;
1720 I915_WRITE(reg, val | TRANS_ENABLE);
1721 if (intel_wait_for_register(dev_priv,
1722 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1724 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728 enum transcoder cpu_transcoder)
1730 u32 val, pipeconf_val;
1732 /* FDI must be feeding us bits for PCH ports */
1733 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1736 /* Workaround: set timing override bit. */
1737 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1738 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1742 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1744 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1745 PIPECONF_INTERLACED_ILK)
1746 val |= TRANS_INTERLACED;
1748 val |= TRANS_PROGRESSIVE;
1750 I915_WRITE(LPT_TRANSCONF, val);
1751 if (intel_wait_for_register(dev_priv,
1756 DRM_ERROR("Failed to enable PCH transcoder\n");
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1765 /* FDI relies on the transcoder */
1766 assert_fdi_tx_disabled(dev_priv, pipe);
1767 assert_fdi_rx_disabled(dev_priv, pipe);
1769 /* Ports must be off as well */
1770 assert_pch_ports_disabled(dev_priv, pipe);
1772 reg = PCH_TRANSCONF(pipe);
1773 val = I915_READ(reg);
1774 val &= ~TRANS_ENABLE;
1775 I915_WRITE(reg, val);
1776 /* wait for PCH transcoder off, transcoder state */
1777 if (intel_wait_for_register(dev_priv,
1778 reg, TRANS_STATE_ENABLE, 0,
1780 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1782 if (HAS_PCH_CPT(dev_priv)) {
1783 /* Workaround: Clear the timing override chicken bit again. */
1784 reg = TRANS_CHICKEN2(pipe);
1785 val = I915_READ(reg);
1786 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1787 I915_WRITE(reg, val);
1791 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1795 val = I915_READ(LPT_TRANSCONF);
1796 val &= ~TRANS_ENABLE;
1797 I915_WRITE(LPT_TRANSCONF, val);
1798 /* wait for PCH transcoder off, transcoder state */
1799 if (intel_wait_for_register(dev_priv,
1800 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1802 DRM_ERROR("Failed to disable PCH transcoder\n");
1804 /* Workaround: clear timing override bit. */
1805 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1806 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1807 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1810 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1814 if (HAS_PCH_LPT(dev_priv))
1820 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1822 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1824 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1825 enum pipe pipe = crtc->pipe;
1829 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1831 assert_planes_disabled(crtc);
1834 * A pipe without a PLL won't actually be able to drive bits from
1835 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1838 if (HAS_GMCH_DISPLAY(dev_priv)) {
1839 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1840 assert_dsi_pll_enabled(dev_priv);
1842 assert_pll_enabled(dev_priv, pipe);
1844 if (new_crtc_state->has_pch_encoder) {
1845 /* if driving the PCH, we need FDI enabled */
1846 assert_fdi_rx_pll_enabled(dev_priv,
1847 intel_crtc_pch_transcoder(crtc));
1848 assert_fdi_tx_pll_enabled(dev_priv,
1849 (enum pipe) cpu_transcoder);
1851 /* FIXME: assert CPU port conditions for SNB+ */
1854 reg = PIPECONF(cpu_transcoder);
1855 val = I915_READ(reg);
1856 if (val & PIPECONF_ENABLE) {
1857 /* we keep both pipes enabled on 830 */
1858 WARN_ON(!IS_I830(dev_priv));
1862 I915_WRITE(reg, val | PIPECONF_ENABLE);
1866 * Until the pipe starts PIPEDSL reads will return a stale value,
1867 * which causes an apparent vblank timestamp jump when PIPEDSL
1868 * resets to its proper value. That also messes up the frame count
1869 * when it's derived from the timestamps. So let's wait for the
1870 * pipe to start properly before we call drm_crtc_vblank_on()
1872 if (dev_priv->drm.max_vblank_count == 0)
1873 intel_wait_for_pipe_scanline_moving(crtc);
1876 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1878 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1879 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1880 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1881 enum pipe pipe = crtc->pipe;
1885 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1888 * Make sure planes won't keep trying to pump pixels to us,
1889 * or we might hang the display.
1891 assert_planes_disabled(crtc);
1893 reg = PIPECONF(cpu_transcoder);
1894 val = I915_READ(reg);
1895 if ((val & PIPECONF_ENABLE) == 0)
1899 * Double wide has implications for planes
1900 * so best keep it disabled when not needed.
1902 if (old_crtc_state->double_wide)
1903 val &= ~PIPECONF_DOUBLE_WIDE;
1905 /* Don't disable pipe or pipe PLLs if needed */
1906 if (!IS_I830(dev_priv))
1907 val &= ~PIPECONF_ENABLE;
1909 I915_WRITE(reg, val);
1910 if ((val & PIPECONF_ENABLE) == 0)
1911 intel_wait_for_pipe_off(old_crtc_state);
1914 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1916 return IS_GEN2(dev_priv) ? 2048 : 4096;
1920 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1922 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1923 unsigned int cpp = fb->format->cpp[plane];
1925 switch (fb->modifier) {
1926 case DRM_FORMAT_MOD_LINEAR:
1928 case I915_FORMAT_MOD_X_TILED:
1929 if (IS_GEN2(dev_priv))
1933 case I915_FORMAT_MOD_Y_TILED_CCS:
1937 case I915_FORMAT_MOD_Y_TILED:
1938 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1942 case I915_FORMAT_MOD_Yf_TILED_CCS:
1946 case I915_FORMAT_MOD_Yf_TILED:
1962 MISSING_CASE(fb->modifier);
1968 intel_tile_height(const struct drm_framebuffer *fb, int plane)
1970 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1973 return intel_tile_size(to_i915(fb->dev)) /
1974 intel_tile_width_bytes(fb, plane);
1977 /* Return the tile dimensions in pixel units */
1978 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
1979 unsigned int *tile_width,
1980 unsigned int *tile_height)
1982 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
1983 unsigned int cpp = fb->format->cpp[plane];
1985 *tile_width = tile_width_bytes / cpp;
1986 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1990 intel_fb_align_height(const struct drm_framebuffer *fb,
1991 int plane, unsigned int height)
1993 unsigned int tile_height = intel_tile_height(fb, plane);
1995 return ALIGN(height, tile_height);
1998 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2000 unsigned int size = 0;
2003 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2004 size += rot_info->plane[i].width * rot_info->plane[i].height;
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011 const struct drm_framebuffer *fb,
2012 unsigned int rotation)
2014 view->type = I915_GGTT_VIEW_NORMAL;
2015 if (drm_rotation_90_or_270(rotation)) {
2016 view->type = I915_GGTT_VIEW_ROTATED;
2017 view->rotated = to_intel_framebuffer(fb)->rot_info;
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2023 if (IS_I830(dev_priv))
2025 else if (IS_I85X(dev_priv))
2027 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2035 if (INTEL_GEN(dev_priv) >= 9)
2037 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2040 else if (INTEL_GEN(dev_priv) >= 4)
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2049 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2051 /* AUX_DIST needs only 4K alignment */
2055 switch (fb->modifier) {
2056 case DRM_FORMAT_MOD_LINEAR:
2057 return intel_linear_alignment(dev_priv);
2058 case I915_FORMAT_MOD_X_TILED:
2059 if (INTEL_GEN(dev_priv) >= 9)
2062 case I915_FORMAT_MOD_Y_TILED_CCS:
2063 case I915_FORMAT_MOD_Yf_TILED_CCS:
2064 case I915_FORMAT_MOD_Y_TILED:
2065 case I915_FORMAT_MOD_Yf_TILED:
2066 return 1 * 1024 * 1024;
2068 MISSING_CASE(fb->modifier);
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2075 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2078 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2082 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2083 unsigned int rotation,
2085 unsigned long *out_flags)
2087 struct drm_device *dev = fb->dev;
2088 struct drm_i915_private *dev_priv = to_i915(dev);
2089 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2090 struct i915_ggtt_view view;
2091 struct i915_vma *vma;
2092 unsigned int pinctl;
2095 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2097 alignment = intel_surf_alignment(fb, 0);
2099 intel_fill_fb_ggtt_view(&view, fb, rotation);
2101 /* Note that the w/a also requires 64 PTE of padding following the
2102 * bo. We currently fill all unused PTE with the shadow page and so
2103 * we should always have valid PTE following the scanout preventing
2106 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2107 alignment = 256 * 1024;
2110 * Global gtt pte registers are special registers which actually forward
2111 * writes to a chunk of system memory. Which means that there is no risk
2112 * that the register values disappear as soon as we call
2113 * intel_runtime_pm_put(), so it is correct to wrap only the
2114 * pin/unpin/fence and not more.
2116 intel_runtime_pm_get(dev_priv);
2118 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2122 /* Valleyview is definitely limited to scanning out the first
2123 * 512MiB. Lets presume this behaviour was inherited from the
2124 * g4x display engine and that all earlier gen are similarly
2125 * limited. Testing suggests that it is a little more
2126 * complicated than this. For example, Cherryview appears quite
2127 * happy to scanout from anywhere within its global aperture.
2129 if (HAS_GMCH_DISPLAY(dev_priv))
2130 pinctl |= PIN_MAPPABLE;
2132 vma = i915_gem_object_pin_to_display_plane(obj,
2133 alignment, &view, pinctl);
2137 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2140 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141 * fence, whereas 965+ only requires a fence if using
2142 * framebuffer compression. For simplicity, we always, when
2143 * possible, install a fence as the cost is not that onerous.
2145 * If we fail to fence the tiled scanout, then either the
2146 * modeset will reject the change (which is highly unlikely as
2147 * the affected systems, all but one, do not have unmappable
2148 * space) or we will not be able to enable full powersaving
2149 * techniques (also likely not to apply due to various limits
2150 * FBC and the like impose on the size of the buffer, which
2151 * presumably we violated anyway with this unmappable buffer).
2152 * Anyway, it is presumably better to stumble onwards with
2153 * something and try to run the system in a "less than optimal"
2154 * mode that matches the user configuration.
2156 ret = i915_vma_pin_fence(vma);
2157 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2158 i915_gem_object_unpin_from_display_plane(vma);
2163 if (ret == 0 && vma->fence)
2164 *out_flags |= PLANE_HAS_FENCE;
2169 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2171 intel_runtime_pm_put(dev_priv);
2175 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2177 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2179 if (flags & PLANE_HAS_FENCE)
2180 i915_vma_unpin_fence(vma);
2181 i915_gem_object_unpin_from_display_plane(vma);
2185 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2186 unsigned int rotation)
2188 if (drm_rotation_90_or_270(rotation))
2189 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2191 return fb->pitches[plane];
2195 * Convert the x/y offsets into a linear offset.
2196 * Only valid with 0/180 degree rotation, which is fine since linear
2197 * offset is only used with linear buffers on pre-hsw and tiled buffers
2198 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2200 u32 intel_fb_xy_to_linear(int x, int y,
2201 const struct intel_plane_state *state,
2204 const struct drm_framebuffer *fb = state->base.fb;
2205 unsigned int cpp = fb->format->cpp[plane];
2206 unsigned int pitch = fb->pitches[plane];
2208 return y * pitch + x * cpp;
2212 * Add the x/y offsets derived from fb->offsets[] to the user
2213 * specified plane src x/y offsets. The resulting x/y offsets
2214 * specify the start of scanout from the beginning of the gtt mapping.
2216 void intel_add_fb_offsets(int *x, int *y,
2217 const struct intel_plane_state *state,
2221 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2222 unsigned int rotation = state->base.rotation;
2224 if (drm_rotation_90_or_270(rotation)) {
2225 *x += intel_fb->rotated[plane].x;
2226 *y += intel_fb->rotated[plane].y;
2228 *x += intel_fb->normal[plane].x;
2229 *y += intel_fb->normal[plane].y;
2233 static u32 __intel_adjust_tile_offset(int *x, int *y,
2234 unsigned int tile_width,
2235 unsigned int tile_height,
2236 unsigned int tile_size,
2237 unsigned int pitch_tiles,
2241 unsigned int pitch_pixels = pitch_tiles * tile_width;
2244 WARN_ON(old_offset & (tile_size - 1));
2245 WARN_ON(new_offset & (tile_size - 1));
2246 WARN_ON(new_offset > old_offset);
2248 tiles = (old_offset - new_offset) / tile_size;
2250 *y += tiles / pitch_tiles * tile_height;
2251 *x += tiles % pitch_tiles * tile_width;
2253 /* minimize x in case it got needlessly big */
2254 *y += *x / pitch_pixels * tile_height;
2260 static u32 _intel_adjust_tile_offset(int *x, int *y,
2261 const struct drm_framebuffer *fb, int plane,
2262 unsigned int rotation,
2263 u32 old_offset, u32 new_offset)
2265 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2266 unsigned int cpp = fb->format->cpp[plane];
2267 unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2269 WARN_ON(new_offset > old_offset);
2271 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2272 unsigned int tile_size, tile_width, tile_height;
2273 unsigned int pitch_tiles;
2275 tile_size = intel_tile_size(dev_priv);
2276 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2278 if (drm_rotation_90_or_270(rotation)) {
2279 pitch_tiles = pitch / tile_height;
2280 swap(tile_width, tile_height);
2282 pitch_tiles = pitch / (tile_width * cpp);
2285 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2286 tile_size, pitch_tiles,
2287 old_offset, new_offset);
2289 old_offset += *y * pitch + *x * cpp;
2291 *y = (old_offset - new_offset) / pitch;
2292 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2299 * Adjust the tile offset by moving the difference into
2302 static u32 intel_adjust_tile_offset(int *x, int *y,
2303 const struct intel_plane_state *state, int plane,
2304 u32 old_offset, u32 new_offset)
2306 return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
2307 state->base.rotation,
2308 old_offset, new_offset);
2312 * Computes the linear offset to the base tile and adjusts
2313 * x, y. bytes per pixel is assumed to be a power-of-two.
2315 * In the 90/270 rotated case, x and y are assumed
2316 * to be already rotated to match the rotated GTT view, and
2317 * pitch is the tile_height aligned framebuffer height.
2319 * This function is used when computing the derived information
2320 * under intel_framebuffer, so using any of that information
2321 * here is not allowed. Anything under drm_framebuffer can be
2322 * used. This is why the user has to pass in the pitch since it
2323 * is specified in the rotated orientation.
2325 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2327 const struct drm_framebuffer *fb, int plane,
2329 unsigned int rotation,
2332 uint64_t fb_modifier = fb->modifier;
2333 unsigned int cpp = fb->format->cpp[plane];
2334 u32 offset, offset_aligned;
2339 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2340 unsigned int tile_size, tile_width, tile_height;
2341 unsigned int tile_rows, tiles, pitch_tiles;
2343 tile_size = intel_tile_size(dev_priv);
2344 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2346 if (drm_rotation_90_or_270(rotation)) {
2347 pitch_tiles = pitch / tile_height;
2348 swap(tile_width, tile_height);
2350 pitch_tiles = pitch / (tile_width * cpp);
2353 tile_rows = *y / tile_height;
2356 tiles = *x / tile_width;
2359 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2360 offset_aligned = offset & ~alignment;
2362 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2363 tile_size, pitch_tiles,
2364 offset, offset_aligned);
2366 offset = *y * pitch + *x * cpp;
2367 offset_aligned = offset & ~alignment;
2369 *y = (offset & alignment) / pitch;
2370 *x = ((offset & alignment) - *y * pitch) / cpp;
2373 return offset_aligned;
2376 u32 intel_compute_tile_offset(int *x, int *y,
2377 const struct intel_plane_state *state,
2380 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2381 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2382 const struct drm_framebuffer *fb = state->base.fb;
2383 unsigned int rotation = state->base.rotation;
2384 int pitch = intel_fb_pitch(fb, plane, rotation);
2387 if (intel_plane->id == PLANE_CURSOR)
2388 alignment = intel_cursor_alignment(dev_priv);
2390 alignment = intel_surf_alignment(fb, plane);
2392 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2393 rotation, alignment);
2396 /* Convert the fb->offset[] into x/y offsets */
2397 static int intel_fb_offset_to_xy(int *x, int *y,
2398 const struct drm_framebuffer *fb, int plane)
2400 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2403 fb->offsets[plane] % intel_tile_size(dev_priv))
2409 _intel_adjust_tile_offset(x, y,
2410 fb, plane, DRM_MODE_ROTATE_0,
2411 fb->offsets[plane], 0);
2416 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2418 switch (fb_modifier) {
2419 case I915_FORMAT_MOD_X_TILED:
2420 return I915_TILING_X;
2421 case I915_FORMAT_MOD_Y_TILED:
2422 case I915_FORMAT_MOD_Y_TILED_CCS:
2423 return I915_TILING_Y;
2425 return I915_TILING_NONE;
2430 * From the Sky Lake PRM:
2431 * "The Color Control Surface (CCS) contains the compression status of
2432 * the cache-line pairs. The compression state of the cache-line pair
2433 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2434 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2435 * cache-line-pairs. CCS is always Y tiled."
2437 * Since cache line pairs refers to horizontally adjacent cache lines,
2438 * each cache line in the CCS corresponds to an area of 32x16 cache
2439 * lines on the main surface. Since each pixel is 4 bytes, this gives
2440 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2443 static const struct drm_format_info ccs_formats[] = {
2444 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2445 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2446 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2447 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450 static const struct drm_format_info *
2451 lookup_format_info(const struct drm_format_info formats[],
2452 int num_formats, u32 format)
2456 for (i = 0; i < num_formats; i++) {
2457 if (formats[i].format == format)
2464 static const struct drm_format_info *
2465 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2467 switch (cmd->modifier[0]) {
2468 case I915_FORMAT_MOD_Y_TILED_CCS:
2469 case I915_FORMAT_MOD_Yf_TILED_CCS:
2470 return lookup_format_info(ccs_formats,
2471 ARRAY_SIZE(ccs_formats),
2478 bool is_ccs_modifier(u64 modifier)
2480 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2481 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2486 struct drm_framebuffer *fb)
2488 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2489 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2490 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2491 u32 gtt_offset_rotated = 0;
2492 unsigned int max_size = 0;
2493 int i, num_planes = fb->format->num_planes;
2494 unsigned int tile_size = intel_tile_size(dev_priv);
2496 for (i = 0; i < num_planes; i++) {
2497 unsigned int width, height;
2498 unsigned int cpp, size;
2503 cpp = fb->format->cpp[i];
2504 width = drm_framebuffer_plane_width(fb->width, fb, i);
2505 height = drm_framebuffer_plane_height(fb->height, fb, i);
2507 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2509 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2514 if (is_ccs_modifier(fb->modifier) && i == 1) {
2515 int hsub = fb->format->hsub;
2516 int vsub = fb->format->vsub;
2517 int tile_width, tile_height;
2521 intel_tile_dims(fb, i, &tile_width, &tile_height);
2523 tile_height *= vsub;
2525 ccs_x = (x * hsub) % tile_width;
2526 ccs_y = (y * vsub) % tile_height;
2527 main_x = intel_fb->normal[0].x % tile_width;
2528 main_y = intel_fb->normal[0].y % tile_height;
2531 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2532 * x/y offsets must match between CCS and the main surface.
2534 if (main_x != ccs_x || main_y != ccs_y) {
2535 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2538 intel_fb->normal[0].x,
2539 intel_fb->normal[0].y,
2546 * The fence (if used) is aligned to the start of the object
2547 * so having the framebuffer wrap around across the edge of the
2548 * fenced region doesn't really work. We have no API to configure
2549 * the fence start offset within the object (nor could we probably
2550 * on gen2/3). So it's just easier if we just require that the
2551 * fb layout agrees with the fence layout. We already check that the
2552 * fb stride matches the fence stride elsewhere.
2554 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2555 (x + width) * cpp > fb->pitches[i]) {
2556 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2562 * First pixel of the framebuffer from
2563 * the start of the normal gtt mapping.
2565 intel_fb->normal[i].x = x;
2566 intel_fb->normal[i].y = y;
2568 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
2569 fb, i, fb->pitches[i],
2570 DRM_MODE_ROTATE_0, tile_size);
2571 offset /= tile_size;
2573 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2574 unsigned int tile_width, tile_height;
2575 unsigned int pitch_tiles;
2578 intel_tile_dims(fb, i, &tile_width, &tile_height);
2580 rot_info->plane[i].offset = offset;
2581 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2582 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2583 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2585 intel_fb->rotated[i].pitch =
2586 rot_info->plane[i].height * tile_height;
2588 /* how many tiles does this plane need */
2589 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2591 * If the plane isn't horizontally tile aligned,
2592 * we need one more tile.
2597 /* rotate the x/y offsets to match the GTT view */
2603 rot_info->plane[i].width * tile_width,
2604 rot_info->plane[i].height * tile_height,
2605 DRM_MODE_ROTATE_270);
2609 /* rotate the tile dimensions to match the GTT view */
2610 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2611 swap(tile_width, tile_height);
2614 * We only keep the x/y offsets, so push all of the
2615 * gtt offset into the x/y offsets.
2617 __intel_adjust_tile_offset(&x, &y,
2618 tile_width, tile_height,
2619 tile_size, pitch_tiles,
2620 gtt_offset_rotated * tile_size, 0);
2622 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2625 * First pixel of the framebuffer from
2626 * the start of the rotated gtt mapping.
2628 intel_fb->rotated[i].x = x;
2629 intel_fb->rotated[i].y = y;
2631 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2632 x * cpp, tile_size);
2635 /* how many tiles in total needed in the bo */
2636 max_size = max(max_size, offset + size);
2639 if (max_size * tile_size > obj->base.size) {
2640 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
2641 max_size * tile_size, obj->base.size);
2648 static int i9xx_format_to_fourcc(int format)
2651 case DISPPLANE_8BPP:
2652 return DRM_FORMAT_C8;
2653 case DISPPLANE_BGRX555:
2654 return DRM_FORMAT_XRGB1555;
2655 case DISPPLANE_BGRX565:
2656 return DRM_FORMAT_RGB565;
2658 case DISPPLANE_BGRX888:
2659 return DRM_FORMAT_XRGB8888;
2660 case DISPPLANE_RGBX888:
2661 return DRM_FORMAT_XBGR8888;
2662 case DISPPLANE_BGRX101010:
2663 return DRM_FORMAT_XRGB2101010;
2664 case DISPPLANE_RGBX101010:
2665 return DRM_FORMAT_XBGR2101010;
2669 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2672 case PLANE_CTL_FORMAT_RGB_565:
2673 return DRM_FORMAT_RGB565;
2674 case PLANE_CTL_FORMAT_NV12:
2675 return DRM_FORMAT_NV12;
2677 case PLANE_CTL_FORMAT_XRGB_8888:
2680 return DRM_FORMAT_ABGR8888;
2682 return DRM_FORMAT_XBGR8888;
2685 return DRM_FORMAT_ARGB8888;
2687 return DRM_FORMAT_XRGB8888;
2689 case PLANE_CTL_FORMAT_XRGB_2101010:
2691 return DRM_FORMAT_XBGR2101010;
2693 return DRM_FORMAT_XRGB2101010;
2698 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2699 struct intel_initial_plane_config *plane_config)
2701 struct drm_device *dev = crtc->base.dev;
2702 struct drm_i915_private *dev_priv = to_i915(dev);
2703 struct drm_i915_gem_object *obj = NULL;
2704 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2705 struct drm_framebuffer *fb = &plane_config->fb->base;
2706 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2707 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2710 size_aligned -= base_aligned;
2712 if (plane_config->size == 0)
2715 /* If the FB is too big, just don't use it since fbdev is not very
2716 * important and we should probably use that space with FBC or other
2718 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2721 mutex_lock(&dev->struct_mutex);
2722 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2726 mutex_unlock(&dev->struct_mutex);
2730 if (plane_config->tiling == I915_TILING_X)
2731 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2733 mode_cmd.pixel_format = fb->format->format;
2734 mode_cmd.width = fb->width;
2735 mode_cmd.height = fb->height;
2736 mode_cmd.pitches[0] = fb->pitches[0];
2737 mode_cmd.modifier[0] = fb->modifier;
2738 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2740 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2741 DRM_DEBUG_KMS("intel fb init failed\n");
2746 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2750 i915_gem_object_put(obj);
2755 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2756 struct intel_plane_state *plane_state,
2759 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2761 plane_state->base.visible = visible;
2763 /* FIXME pre-g4x don't work like this */
2765 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2766 crtc_state->active_planes |= BIT(plane->id);
2768 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2769 crtc_state->active_planes &= ~BIT(plane->id);
2772 DRM_DEBUG_KMS("%s active planes 0x%x\n",
2773 crtc_state->base.crtc->name,
2774 crtc_state->active_planes);
2777 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2778 struct intel_plane *plane)
2780 struct intel_crtc_state *crtc_state =
2781 to_intel_crtc_state(crtc->base.state);
2782 struct intel_plane_state *plane_state =
2783 to_intel_plane_state(plane->base.state);
2785 intel_set_plane_visible(crtc_state, plane_state, false);
2787 if (plane->id == PLANE_PRIMARY)
2788 intel_pre_disable_primary_noatomic(&crtc->base);
2790 trace_intel_disable_plane(&plane->base, crtc);
2791 plane->disable_plane(plane, crtc);
2795 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2796 struct intel_initial_plane_config *plane_config)
2798 struct drm_device *dev = intel_crtc->base.dev;
2799 struct drm_i915_private *dev_priv = to_i915(dev);
2801 struct drm_i915_gem_object *obj;
2802 struct drm_plane *primary = intel_crtc->base.primary;
2803 struct drm_plane_state *plane_state = primary->state;
2804 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2805 struct intel_plane *intel_plane = to_intel_plane(primary);
2806 struct intel_plane_state *intel_state =
2807 to_intel_plane_state(plane_state);
2808 struct drm_framebuffer *fb;
2810 if (!plane_config->fb)
2813 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2814 fb = &plane_config->fb->base;
2818 kfree(plane_config->fb);
2821 * Failed to alloc the obj, check to see if we should share
2822 * an fb with another CRTC instead
2824 for_each_crtc(dev, c) {
2825 struct intel_plane_state *state;
2827 if (c == &intel_crtc->base)
2830 if (!to_intel_crtc(c)->active)
2833 state = to_intel_plane_state(c->primary->state);
2837 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2838 fb = state->base.fb;
2839 drm_framebuffer_get(fb);
2845 * We've failed to reconstruct the BIOS FB. Current display state
2846 * indicates that the primary plane is visible, but has a NULL FB,
2847 * which will lead to problems later if we don't fix it up. The
2848 * simplest solution is to just disable the primary plane now and
2849 * pretend the BIOS never had it enabled.
2851 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2856 mutex_lock(&dev->struct_mutex);
2858 intel_pin_and_fence_fb_obj(fb,
2859 primary->state->rotation,
2860 intel_plane_uses_fence(intel_state),
2861 &intel_state->flags);
2862 mutex_unlock(&dev->struct_mutex);
2863 if (IS_ERR(intel_state->vma)) {
2864 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2865 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2867 intel_state->vma = NULL;
2868 drm_framebuffer_put(fb);
2872 obj = intel_fb_obj(fb);
2873 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2875 plane_state->src_x = 0;
2876 plane_state->src_y = 0;
2877 plane_state->src_w = fb->width << 16;
2878 plane_state->src_h = fb->height << 16;
2880 plane_state->crtc_x = 0;
2881 plane_state->crtc_y = 0;
2882 plane_state->crtc_w = fb->width;
2883 plane_state->crtc_h = fb->height;
2885 intel_state->base.src = drm_plane_state_src(plane_state);
2886 intel_state->base.dst = drm_plane_state_dest(plane_state);
2888 if (i915_gem_object_is_tiled(obj))
2889 dev_priv->preserve_bios_swizzle = true;
2891 plane_state->fb = fb;
2892 plane_state->crtc = &intel_crtc->base;
2894 intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2895 to_intel_plane_state(plane_state),
2898 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2899 &obj->frontbuffer_bits);
2902 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2903 unsigned int rotation)
2905 int cpp = fb->format->cpp[plane];
2907 switch (fb->modifier) {
2908 case DRM_FORMAT_MOD_LINEAR:
2909 case I915_FORMAT_MOD_X_TILED:
2922 case I915_FORMAT_MOD_Y_TILED_CCS:
2923 case I915_FORMAT_MOD_Yf_TILED_CCS:
2924 /* FIXME AUX plane? */
2925 case I915_FORMAT_MOD_Y_TILED:
2926 case I915_FORMAT_MOD_Yf_TILED:
2941 MISSING_CASE(fb->modifier);
2947 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2948 int main_x, int main_y, u32 main_offset)
2950 const struct drm_framebuffer *fb = plane_state->base.fb;
2951 int hsub = fb->format->hsub;
2952 int vsub = fb->format->vsub;
2953 int aux_x = plane_state->aux.x;
2954 int aux_y = plane_state->aux.y;
2955 u32 aux_offset = plane_state->aux.offset;
2956 u32 alignment = intel_surf_alignment(fb, 1);
2958 while (aux_offset >= main_offset && aux_y <= main_y) {
2961 if (aux_x == main_x && aux_y == main_y)
2964 if (aux_offset == 0)
2969 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
2970 aux_offset, aux_offset - alignment);
2971 aux_x = x * hsub + aux_x % hsub;
2972 aux_y = y * vsub + aux_y % vsub;
2975 if (aux_x != main_x || aux_y != main_y)
2978 plane_state->aux.offset = aux_offset;
2979 plane_state->aux.x = aux_x;
2980 plane_state->aux.y = aux_y;
2985 static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
2986 struct intel_plane_state *plane_state)
2988 struct drm_i915_private *dev_priv =
2989 to_i915(plane_state->base.plane->dev);
2990 const struct drm_framebuffer *fb = plane_state->base.fb;
2991 unsigned int rotation = plane_state->base.rotation;
2992 int x = plane_state->base.src.x1 >> 16;
2993 int y = plane_state->base.src.y1 >> 16;
2994 int w = drm_rect_width(&plane_state->base.src) >> 16;
2995 int h = drm_rect_height(&plane_state->base.src) >> 16;
2996 int dst_x = plane_state->base.dst.x1;
2997 int dst_w = drm_rect_width(&plane_state->base.dst);
2998 int pipe_src_w = crtc_state->pipe_src_w;
2999 int max_width = skl_max_plane_width(fb, 0, rotation);
3000 int max_height = 4096;
3001 u32 alignment, offset, aux_offset = plane_state->aux.offset;
3003 if (w > max_width || h > max_height) {
3004 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3005 w, h, max_width, max_height);
3010 * Display WA #1175: cnl,glk
3011 * Planes other than the cursor may cause FIFO underflow and display
3012 * corruption if starting less than 4 pixels from the right edge of
3014 * Besides the above WA fix the similar problem, where planes other
3015 * than the cursor ending less than 4 pixels from the left edge of the
3016 * screen may cause FIFO underflow and display corruption.
3018 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3019 (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
3020 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3021 dst_x + dst_w < 4 ? "end" : "start",
3022 dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
3027 intel_add_fb_offsets(&x, &y, plane_state, 0);
3028 offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
3029 alignment = intel_surf_alignment(fb, 0);
3032 * AUX surface offset is specified as the distance from the
3033 * main surface offset, and it must be non-negative. Make
3034 * sure that is what we will get.
3036 if (offset > aux_offset)
3037 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3038 offset, aux_offset & ~(alignment - 1));
3041 * When using an X-tiled surface, the plane blows up
3042 * if the x offset + width exceed the stride.
3044 * TODO: linear and Y-tiled seem fine, Yf untested,
3046 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3047 int cpp = fb->format->cpp[0];
3049 while ((x + w) * cpp > fb->pitches[0]) {
3051 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3055 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3056 offset, offset - alignment);
3061 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3062 * they match with the main surface x/y offsets.
3064 if (is_ccs_modifier(fb->modifier)) {
3065 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3069 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3070 offset, offset - alignment);
3073 if (x != plane_state->aux.x || y != plane_state->aux.y) {
3074 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3079 plane_state->main.offset = offset;
3080 plane_state->main.x = x;
3081 plane_state->main.y = y;
3087 skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
3088 struct intel_plane_state *plane_state)
3090 /* Display WA #1106 */
3091 if (plane_state->base.rotation !=
3092 (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3093 plane_state->base.rotation != DRM_MODE_ROTATE_270)
3097 * src coordinates are rotated here.
3098 * We check height but report it as width
3100 if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3101 DRM_DEBUG_KMS("src width must be multiple "
3102 "of 4 for rotated NV12\n");
3109 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3111 const struct drm_framebuffer *fb = plane_state->base.fb;
3112 unsigned int rotation = plane_state->base.rotation;
3113 int max_width = skl_max_plane_width(fb, 1, rotation);
3114 int max_height = 4096;
3115 int x = plane_state->base.src.x1 >> 17;
3116 int y = plane_state->base.src.y1 >> 17;
3117 int w = drm_rect_width(&plane_state->base.src) >> 17;
3118 int h = drm_rect_height(&plane_state->base.src) >> 17;
3121 intel_add_fb_offsets(&x, &y, plane_state, 1);
3122 offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3124 /* FIXME not quite sure how/if these apply to the chroma plane */
3125 if (w > max_width || h > max_height) {
3126 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3127 w, h, max_width, max_height);
3131 plane_state->aux.offset = offset;
3132 plane_state->aux.x = x;
3133 plane_state->aux.y = y;
3138 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3140 const struct drm_framebuffer *fb = plane_state->base.fb;
3141 int src_x = plane_state->base.src.x1 >> 16;
3142 int src_y = plane_state->base.src.y1 >> 16;
3143 int hsub = fb->format->hsub;
3144 int vsub = fb->format->vsub;
3145 int x = src_x / hsub;
3146 int y = src_y / vsub;
3149 if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
3150 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
3151 plane_state->base.rotation);
3155 intel_add_fb_offsets(&x, &y, plane_state, 1);
3156 offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3158 plane_state->aux.offset = offset;
3159 plane_state->aux.x = x * hsub + src_x % hsub;
3160 plane_state->aux.y = y * vsub + src_y % vsub;
3165 int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3166 struct intel_plane_state *plane_state)
3168 const struct drm_framebuffer *fb = plane_state->base.fb;
3169 unsigned int rotation = plane_state->base.rotation;
3172 if (rotation & DRM_MODE_REFLECT_X &&
3173 fb->modifier == DRM_FORMAT_MOD_LINEAR) {
3174 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");