2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
46 /* Primary plane formats supported by all gen */
47 #define COMMON_PRIMARY_FORMATS \
50 DRM_FORMAT_XRGB8888, \
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t intel_primary_formats_gen2[] = {
55 COMMON_PRIMARY_FORMATS,
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t intel_primary_formats_gen4[] = {
62 COMMON_PRIMARY_FORMATS, \
65 DRM_FORMAT_XRGB2101010,
66 DRM_FORMAT_ARGB2101010,
67 DRM_FORMAT_XBGR2101010,
68 DRM_FORMAT_ABGR2101010,
72 static const uint32_t intel_cursor_formats[] = {
76 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
78 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
79 struct intel_crtc_state *pipe_config);
80 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
81 struct intel_crtc_state *pipe_config);
83 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
84 int x, int y, struct drm_framebuffer *old_fb);
85 static int intel_framebuffer_init(struct drm_device *dev,
86 struct intel_framebuffer *ifb,
87 struct drm_mode_fb_cmd2 *mode_cmd,
88 struct drm_i915_gem_object *obj);
89 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
90 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
91 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
92 struct intel_link_m_n *m_n,
93 struct intel_link_m_n *m2_n2);
94 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
95 static void haswell_set_pipeconf(struct drm_crtc *crtc);
96 static void intel_set_pipe_csc(struct drm_crtc *crtc);
97 static void vlv_prepare_pll(struct intel_crtc *crtc,
98 const struct intel_crtc_state *pipe_config);
99 static void chv_prepare_pll(struct intel_crtc *crtc,
100 const struct intel_crtc_state *pipe_config);
101 static void intel_begin_crtc_commit(struct drm_crtc *crtc);
102 static void intel_finish_crtc_commit(struct drm_crtc *crtc);
104 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
106 if (!connector->mst_port)
107 return connector->encoder;
109 return &connector->mst_port->mst_encoders[pipe]->base;
118 int p2_slow, p2_fast;
121 typedef struct intel_limit intel_limit_t;
123 intel_range_t dot, vco, n, m, m1, m2, p, p1;
128 intel_pch_rawclk(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = dev->dev_private;
132 WARN_ON(!HAS_PCH_SPLIT(dev));
134 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
137 static inline u32 /* units of 100MHz */
138 intel_fdi_link_freq(struct drm_device *dev)
141 struct drm_i915_private *dev_priv = dev->dev_private;
142 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
147 static const intel_limit_t intel_limits_i8xx_dac = {
148 .dot = { .min = 25000, .max = 350000 },
149 .vco = { .min = 908000, .max = 1512000 },
150 .n = { .min = 2, .max = 16 },
151 .m = { .min = 96, .max = 140 },
152 .m1 = { .min = 18, .max = 26 },
153 .m2 = { .min = 6, .max = 16 },
154 .p = { .min = 4, .max = 128 },
155 .p1 = { .min = 2, .max = 33 },
156 .p2 = { .dot_limit = 165000,
157 .p2_slow = 4, .p2_fast = 2 },
160 static const intel_limit_t intel_limits_i8xx_dvo = {
161 .dot = { .min = 25000, .max = 350000 },
162 .vco = { .min = 908000, .max = 1512000 },
163 .n = { .min = 2, .max = 16 },
164 .m = { .min = 96, .max = 140 },
165 .m1 = { .min = 18, .max = 26 },
166 .m2 = { .min = 6, .max = 16 },
167 .p = { .min = 4, .max = 128 },
168 .p1 = { .min = 2, .max = 33 },
169 .p2 = { .dot_limit = 165000,
170 .p2_slow = 4, .p2_fast = 4 },
173 static const intel_limit_t intel_limits_i8xx_lvds = {
174 .dot = { .min = 25000, .max = 350000 },
175 .vco = { .min = 908000, .max = 1512000 },
176 .n = { .min = 2, .max = 16 },
177 .m = { .min = 96, .max = 140 },
178 .m1 = { .min = 18, .max = 26 },
179 .m2 = { .min = 6, .max = 16 },
180 .p = { .min = 4, .max = 128 },
181 .p1 = { .min = 1, .max = 6 },
182 .p2 = { .dot_limit = 165000,
183 .p2_slow = 14, .p2_fast = 7 },
186 static const intel_limit_t intel_limits_i9xx_sdvo = {
187 .dot = { .min = 20000, .max = 400000 },
188 .vco = { .min = 1400000, .max = 2800000 },
189 .n = { .min = 1, .max = 6 },
190 .m = { .min = 70, .max = 120 },
191 .m1 = { .min = 8, .max = 18 },
192 .m2 = { .min = 3, .max = 7 },
193 .p = { .min = 5, .max = 80 },
194 .p1 = { .min = 1, .max = 8 },
195 .p2 = { .dot_limit = 200000,
196 .p2_slow = 10, .p2_fast = 5 },
199 static const intel_limit_t intel_limits_i9xx_lvds = {
200 .dot = { .min = 20000, .max = 400000 },
201 .vco = { .min = 1400000, .max = 2800000 },
202 .n = { .min = 1, .max = 6 },
203 .m = { .min = 70, .max = 120 },
204 .m1 = { .min = 8, .max = 18 },
205 .m2 = { .min = 3, .max = 7 },
206 .p = { .min = 7, .max = 98 },
207 .p1 = { .min = 1, .max = 8 },
208 .p2 = { .dot_limit = 112000,
209 .p2_slow = 14, .p2_fast = 7 },
213 static const intel_limit_t intel_limits_g4x_sdvo = {
214 .dot = { .min = 25000, .max = 270000 },
215 .vco = { .min = 1750000, .max = 3500000},
216 .n = { .min = 1, .max = 4 },
217 .m = { .min = 104, .max = 138 },
218 .m1 = { .min = 17, .max = 23 },
219 .m2 = { .min = 5, .max = 11 },
220 .p = { .min = 10, .max = 30 },
221 .p1 = { .min = 1, .max = 3},
222 .p2 = { .dot_limit = 270000,
228 static const intel_limit_t intel_limits_g4x_hdmi = {
229 .dot = { .min = 22000, .max = 400000 },
230 .vco = { .min = 1750000, .max = 3500000},
231 .n = { .min = 1, .max = 4 },
232 .m = { .min = 104, .max = 138 },
233 .m1 = { .min = 16, .max = 23 },
234 .m2 = { .min = 5, .max = 11 },
235 .p = { .min = 5, .max = 80 },
236 .p1 = { .min = 1, .max = 8},
237 .p2 = { .dot_limit = 165000,
238 .p2_slow = 10, .p2_fast = 5 },
241 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
242 .dot = { .min = 20000, .max = 115000 },
243 .vco = { .min = 1750000, .max = 3500000 },
244 .n = { .min = 1, .max = 3 },
245 .m = { .min = 104, .max = 138 },
246 .m1 = { .min = 17, .max = 23 },
247 .m2 = { .min = 5, .max = 11 },
248 .p = { .min = 28, .max = 112 },
249 .p1 = { .min = 2, .max = 8 },
250 .p2 = { .dot_limit = 0,
251 .p2_slow = 14, .p2_fast = 14
255 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
256 .dot = { .min = 80000, .max = 224000 },
257 .vco = { .min = 1750000, .max = 3500000 },
258 .n = { .min = 1, .max = 3 },
259 .m = { .min = 104, .max = 138 },
260 .m1 = { .min = 17, .max = 23 },
261 .m2 = { .min = 5, .max = 11 },
262 .p = { .min = 14, .max = 42 },
263 .p1 = { .min = 2, .max = 6 },
264 .p2 = { .dot_limit = 0,
265 .p2_slow = 7, .p2_fast = 7
269 static const intel_limit_t intel_limits_pineview_sdvo = {
270 .dot = { .min = 20000, .max = 400000},
271 .vco = { .min = 1700000, .max = 3500000 },
272 /* Pineview's Ncounter is a ring counter */
273 .n = { .min = 3, .max = 6 },
274 .m = { .min = 2, .max = 256 },
275 /* Pineview only has one combined m divider, which we treat as m2. */
276 .m1 = { .min = 0, .max = 0 },
277 .m2 = { .min = 0, .max = 254 },
278 .p = { .min = 5, .max = 80 },
279 .p1 = { .min = 1, .max = 8 },
280 .p2 = { .dot_limit = 200000,
281 .p2_slow = 10, .p2_fast = 5 },
284 static const intel_limit_t intel_limits_pineview_lvds = {
285 .dot = { .min = 20000, .max = 400000 },
286 .vco = { .min = 1700000, .max = 3500000 },
287 .n = { .min = 3, .max = 6 },
288 .m = { .min = 2, .max = 256 },
289 .m1 = { .min = 0, .max = 0 },
290 .m2 = { .min = 0, .max = 254 },
291 .p = { .min = 7, .max = 112 },
292 .p1 = { .min = 1, .max = 8 },
293 .p2 = { .dot_limit = 112000,
294 .p2_slow = 14, .p2_fast = 14 },
297 /* Ironlake / Sandybridge
299 * We calculate clock using (register_value + 2) for N/M1/M2, so here
300 * the range value for them is (actual_value - 2).
302 static const intel_limit_t intel_limits_ironlake_dac = {
303 .dot = { .min = 25000, .max = 350000 },
304 .vco = { .min = 1760000, .max = 3510000 },
305 .n = { .min = 1, .max = 5 },
306 .m = { .min = 79, .max = 127 },
307 .m1 = { .min = 12, .max = 22 },
308 .m2 = { .min = 5, .max = 9 },
309 .p = { .min = 5, .max = 80 },
310 .p1 = { .min = 1, .max = 8 },
311 .p2 = { .dot_limit = 225000,
312 .p2_slow = 10, .p2_fast = 5 },
315 static const intel_limit_t intel_limits_ironlake_single_lvds = {
316 .dot = { .min = 25000, .max = 350000 },
317 .vco = { .min = 1760000, .max = 3510000 },
318 .n = { .min = 1, .max = 3 },
319 .m = { .min = 79, .max = 118 },
320 .m1 = { .min = 12, .max = 22 },
321 .m2 = { .min = 5, .max = 9 },
322 .p = { .min = 28, .max = 112 },
323 .p1 = { .min = 2, .max = 8 },
324 .p2 = { .dot_limit = 225000,
325 .p2_slow = 14, .p2_fast = 14 },
328 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
329 .dot = { .min = 25000, .max = 350000 },
330 .vco = { .min = 1760000, .max = 3510000 },
331 .n = { .min = 1, .max = 3 },
332 .m = { .min = 79, .max = 127 },
333 .m1 = { .min = 12, .max = 22 },
334 .m2 = { .min = 5, .max = 9 },
335 .p = { .min = 14, .max = 56 },
336 .p1 = { .min = 2, .max = 8 },
337 .p2 = { .dot_limit = 225000,
338 .p2_slow = 7, .p2_fast = 7 },
341 /* LVDS 100mhz refclk limits. */
342 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
343 .dot = { .min = 25000, .max = 350000 },
344 .vco = { .min = 1760000, .max = 3510000 },
345 .n = { .min = 1, .max = 2 },
346 .m = { .min = 79, .max = 126 },
347 .m1 = { .min = 12, .max = 22 },
348 .m2 = { .min = 5, .max = 9 },
349 .p = { .min = 28, .max = 112 },
350 .p1 = { .min = 2, .max = 8 },
351 .p2 = { .dot_limit = 225000,
352 .p2_slow = 14, .p2_fast = 14 },
355 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
356 .dot = { .min = 25000, .max = 350000 },
357 .vco = { .min = 1760000, .max = 3510000 },
358 .n = { .min = 1, .max = 3 },
359 .m = { .min = 79, .max = 126 },
360 .m1 = { .min = 12, .max = 22 },
361 .m2 = { .min = 5, .max = 9 },
362 .p = { .min = 14, .max = 42 },
363 .p1 = { .min = 2, .max = 6 },
364 .p2 = { .dot_limit = 225000,
365 .p2_slow = 7, .p2_fast = 7 },
368 static const intel_limit_t intel_limits_vlv = {
370 * These are the data rate limits (measured in fast clocks)
371 * since those are the strictest limits we have. The fast
372 * clock and actual rate limits are more relaxed, so checking
373 * them would make no difference.
375 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
376 .vco = { .min = 4000000, .max = 6000000 },
377 .n = { .min = 1, .max = 7 },
378 .m1 = { .min = 2, .max = 3 },
379 .m2 = { .min = 11, .max = 156 },
380 .p1 = { .min = 2, .max = 3 },
381 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
384 static const intel_limit_t intel_limits_chv = {
386 * These are the data rate limits (measured in fast clocks)
387 * since those are the strictest limits we have. The fast
388 * clock and actual rate limits are more relaxed, so checking
389 * them would make no difference.
391 .dot = { .min = 25000 * 5, .max = 540000 * 5},
392 .vco = { .min = 4860000, .max = 6700000 },
393 .n = { .min = 1, .max = 1 },
394 .m1 = { .min = 2, .max = 2 },
395 .m2 = { .min = 24 << 22, .max = 175 << 22 },
396 .p1 = { .min = 2, .max = 4 },
397 .p2 = { .p2_slow = 1, .p2_fast = 14 },
400 static void vlv_clock(int refclk, intel_clock_t *clock)
402 clock->m = clock->m1 * clock->m2;
403 clock->p = clock->p1 * clock->p2;
404 if (WARN_ON(clock->n == 0 || clock->p == 0))
406 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
407 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
411 * Returns whether any output on the specified pipe is of the specified type
413 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
415 struct drm_device *dev = crtc->base.dev;
416 struct intel_encoder *encoder;
418 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
419 if (encoder->type == type)
426 * Returns whether any output on the specified pipe will have the specified
427 * type after a staged modeset is complete, i.e., the same as
428 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
431 static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
433 struct drm_device *dev = crtc->base.dev;
434 struct intel_encoder *encoder;
436 for_each_intel_encoder(dev, encoder)
437 if (encoder->new_crtc == crtc && encoder->type == type)
443 static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
446 struct drm_device *dev = crtc->base.dev;
447 const intel_limit_t *limit;
449 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
450 if (intel_is_dual_link_lvds(dev)) {
451 if (refclk == 100000)
452 limit = &intel_limits_ironlake_dual_lvds_100m;
454 limit = &intel_limits_ironlake_dual_lvds;
456 if (refclk == 100000)
457 limit = &intel_limits_ironlake_single_lvds_100m;
459 limit = &intel_limits_ironlake_single_lvds;
462 limit = &intel_limits_ironlake_dac;
467 static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
469 struct drm_device *dev = crtc->base.dev;
470 const intel_limit_t *limit;
472 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
473 if (intel_is_dual_link_lvds(dev))
474 limit = &intel_limits_g4x_dual_channel_lvds;
476 limit = &intel_limits_g4x_single_channel_lvds;
477 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
478 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
479 limit = &intel_limits_g4x_hdmi;
480 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
481 limit = &intel_limits_g4x_sdvo;
482 } else /* The option is for other outputs */
483 limit = &intel_limits_i9xx_sdvo;
488 static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
490 struct drm_device *dev = crtc->base.dev;
491 const intel_limit_t *limit;
493 if (HAS_PCH_SPLIT(dev))
494 limit = intel_ironlake_limit(crtc, refclk);
495 else if (IS_G4X(dev)) {
496 limit = intel_g4x_limit(crtc);
497 } else if (IS_PINEVIEW(dev)) {
498 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
499 limit = &intel_limits_pineview_lvds;
501 limit = &intel_limits_pineview_sdvo;
502 } else if (IS_CHERRYVIEW(dev)) {
503 limit = &intel_limits_chv;
504 } else if (IS_VALLEYVIEW(dev)) {
505 limit = &intel_limits_vlv;
506 } else if (!IS_GEN2(dev)) {
507 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
508 limit = &intel_limits_i9xx_lvds;
510 limit = &intel_limits_i9xx_sdvo;
512 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
513 limit = &intel_limits_i8xx_lvds;
514 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
515 limit = &intel_limits_i8xx_dvo;
517 limit = &intel_limits_i8xx_dac;
522 /* m1 is reserved as 0 in Pineview, n is a ring counter */
523 static void pineview_clock(int refclk, intel_clock_t *clock)
525 clock->m = clock->m2 + 2;
526 clock->p = clock->p1 * clock->p2;
527 if (WARN_ON(clock->n == 0 || clock->p == 0))
529 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
530 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
535 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
538 static void i9xx_clock(int refclk, intel_clock_t *clock)
540 clock->m = i9xx_dpll_compute_m(clock);
541 clock->p = clock->p1 * clock->p2;
542 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
544 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
545 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
548 static void chv_clock(int refclk, intel_clock_t *clock)
550 clock->m = clock->m1 * clock->m2;
551 clock->p = clock->p1 * clock->p2;
552 if (WARN_ON(clock->n == 0 || clock->p == 0))
554 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
556 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
559 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
561 * Returns whether the given set of divisors are valid for a given refclk with
562 * the given connectors.
565 static bool intel_PLL_is_valid(struct drm_device *dev,
566 const intel_limit_t *limit,
567 const intel_clock_t *clock)
569 if (clock->n < limit->n.min || limit->n.max < clock->n)
570 INTELPllInvalid("n out of range\n");
571 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
572 INTELPllInvalid("p1 out of range\n");
573 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
574 INTELPllInvalid("m2 out of range\n");
575 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
576 INTELPllInvalid("m1 out of range\n");
578 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
579 if (clock->m1 <= clock->m2)
580 INTELPllInvalid("m1 <= m2\n");
582 if (!IS_VALLEYVIEW(dev)) {
583 if (clock->p < limit->p.min || limit->p.max < clock->p)
584 INTELPllInvalid("p out of range\n");
585 if (clock->m < limit->m.min || limit->m.max < clock->m)
586 INTELPllInvalid("m out of range\n");
589 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
590 INTELPllInvalid("vco out of range\n");
591 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
592 * connector, etc., rather than just a single range.
594 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
595 INTELPllInvalid("dot out of range\n");
601 i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
602 int target, int refclk, intel_clock_t *match_clock,
603 intel_clock_t *best_clock)
605 struct drm_device *dev = crtc->base.dev;
609 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
611 * For LVDS just rely on its current settings for dual-channel.
612 * We haven't figured out how to reliably set up different
613 * single/dual channel state, if we even can.
615 if (intel_is_dual_link_lvds(dev))
616 clock.p2 = limit->p2.p2_fast;
618 clock.p2 = limit->p2.p2_slow;
620 if (target < limit->p2.dot_limit)
621 clock.p2 = limit->p2.p2_slow;
623 clock.p2 = limit->p2.p2_fast;
626 memset(best_clock, 0, sizeof(*best_clock));
628 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630 for (clock.m2 = limit->m2.min;
631 clock.m2 <= limit->m2.max; clock.m2++) {
632 if (clock.m2 >= clock.m1)
634 for (clock.n = limit->n.min;
635 clock.n <= limit->n.max; clock.n++) {
636 for (clock.p1 = limit->p1.min;
637 clock.p1 <= limit->p1.max; clock.p1++) {
640 i9xx_clock(refclk, &clock);
641 if (!intel_PLL_is_valid(dev, limit,
645 clock.p != match_clock->p)
648 this_err = abs(clock.dot - target);
649 if (this_err < err) {
658 return (err != target);
662 pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
663 int target, int refclk, intel_clock_t *match_clock,
664 intel_clock_t *best_clock)
666 struct drm_device *dev = crtc->base.dev;
670 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
672 * For LVDS just rely on its current settings for dual-channel.
673 * We haven't figured out how to reliably set up different
674 * single/dual channel state, if we even can.
676 if (intel_is_dual_link_lvds(dev))
677 clock.p2 = limit->p2.p2_fast;
679 clock.p2 = limit->p2.p2_slow;
681 if (target < limit->p2.dot_limit)
682 clock.p2 = limit->p2.p2_slow;
684 clock.p2 = limit->p2.p2_fast;
687 memset(best_clock, 0, sizeof(*best_clock));
689 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
691 for (clock.m2 = limit->m2.min;
692 clock.m2 <= limit->m2.max; clock.m2++) {
693 for (clock.n = limit->n.min;
694 clock.n <= limit->n.max; clock.n++) {
695 for (clock.p1 = limit->p1.min;
696 clock.p1 <= limit->p1.max; clock.p1++) {
699 pineview_clock(refclk, &clock);
700 if (!intel_PLL_is_valid(dev, limit,
704 clock.p != match_clock->p)
707 this_err = abs(clock.dot - target);
708 if (this_err < err) {
717 return (err != target);
721 g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
722 int target, int refclk, intel_clock_t *match_clock,
723 intel_clock_t *best_clock)
725 struct drm_device *dev = crtc->base.dev;
729 /* approximately equals target * 0.00585 */
730 int err_most = (target >> 8) + (target >> 9);
733 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
734 if (intel_is_dual_link_lvds(dev))
735 clock.p2 = limit->p2.p2_fast;
737 clock.p2 = limit->p2.p2_slow;
739 if (target < limit->p2.dot_limit)
740 clock.p2 = limit->p2.p2_slow;
742 clock.p2 = limit->p2.p2_fast;
745 memset(best_clock, 0, sizeof(*best_clock));
746 max_n = limit->n.max;
747 /* based on hardware requirement, prefer smaller n to precision */
748 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
749 /* based on hardware requirement, prefere larger m1,m2 */
750 for (clock.m1 = limit->m1.max;
751 clock.m1 >= limit->m1.min; clock.m1--) {
752 for (clock.m2 = limit->m2.max;
753 clock.m2 >= limit->m2.min; clock.m2--) {
754 for (clock.p1 = limit->p1.max;
755 clock.p1 >= limit->p1.min; clock.p1--) {
758 i9xx_clock(refclk, &clock);
759 if (!intel_PLL_is_valid(dev, limit,
763 this_err = abs(clock.dot - target);
764 if (this_err < err_most) {
778 vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
779 int target, int refclk, intel_clock_t *match_clock,
780 intel_clock_t *best_clock)
782 struct drm_device *dev = crtc->base.dev;
784 unsigned int bestppm = 1000000;
785 /* min update 19.2 MHz */
786 int max_n = min(limit->n.max, refclk / 19200);
789 target *= 5; /* fast clock */
791 memset(best_clock, 0, sizeof(*best_clock));
793 /* based on hardware requirement, prefer smaller n to precision */
794 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
795 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
797 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
798 clock.p = clock.p1 * clock.p2;
799 /* based on hardware requirement, prefer bigger m1,m2 values */
800 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
801 unsigned int ppm, diff;
803 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
806 vlv_clock(refclk, &clock);
808 if (!intel_PLL_is_valid(dev, limit,
812 diff = abs(clock.dot - target);
813 ppm = div_u64(1000000ULL * diff, target);
815 if (ppm < 100 && clock.p > best_clock->p) {
821 if (bestppm >= 10 && ppm < bestppm - 10) {
835 chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
836 int target, int refclk, intel_clock_t *match_clock,
837 intel_clock_t *best_clock)
839 struct drm_device *dev = crtc->base.dev;
844 memset(best_clock, 0, sizeof(*best_clock));
847 * Based on hardware doc, the n always set to 1, and m1 always
848 * set to 2. If requires to support 200Mhz refclk, we need to
849 * revisit this because n may not 1 anymore.
851 clock.n = 1, clock.m1 = 2;
852 target *= 5; /* fast clock */
854 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
855 for (clock.p2 = limit->p2.p2_fast;
856 clock.p2 >= limit->p2.p2_slow;
857 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859 clock.p = clock.p1 * clock.p2;
861 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
862 clock.n) << 22, refclk * clock.m1);
864 if (m2 > INT_MAX/clock.m1)
869 chv_clock(refclk, &clock);
871 if (!intel_PLL_is_valid(dev, limit, &clock))
874 /* based on hardware requirement, prefer bigger p
876 if (clock.p > best_clock->p) {
886 bool intel_crtc_active(struct drm_crtc *crtc)
888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890 /* Be paranoid as we can arrive here with only partial
891 * state retrieved from the hardware during setup.
893 * We can ditch the adjusted_mode.crtc_clock check as soon
894 * as Haswell has gained clock readout/fastboot support.
896 * We can ditch the crtc->primary->fb check as soon as we can
897 * properly reconstruct framebuffers.
899 return intel_crtc->active && crtc->primary->fb &&
900 intel_crtc->config->base.adjusted_mode.crtc_clock;
903 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
906 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
909 return intel_crtc->config->cpu_transcoder;
912 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
914 struct drm_i915_private *dev_priv = dev->dev_private;
915 u32 reg = PIPEDSL(pipe);
920 line_mask = DSL_LINEMASK_GEN2;
922 line_mask = DSL_LINEMASK_GEN3;
924 line1 = I915_READ(reg) & line_mask;
926 line2 = I915_READ(reg) & line_mask;
928 return line1 == line2;
932 * intel_wait_for_pipe_off - wait for pipe to turn off
933 * @crtc: crtc whose pipe to wait for
935 * After disabling a pipe, we can't wait for vblank in the usual way,
936 * spinning on the vblank interrupt status bit, since we won't actually
937 * see an interrupt when the pipe is disabled.
940 * wait for the pipe register state bit to turn off
943 * wait for the display line value to settle (it usually
944 * ends up stopping at the start of the next frame).
947 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
949 struct drm_device *dev = crtc->base.dev;
950 struct drm_i915_private *dev_priv = dev->dev_private;
951 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
952 enum pipe pipe = crtc->pipe;
954 if (INTEL_INFO(dev)->gen >= 4) {
955 int reg = PIPECONF(cpu_transcoder);
957 /* Wait for the Pipe State to go off */
958 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
960 WARN(1, "pipe_off wait timed out\n");
962 /* Wait for the display line to settle */
963 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
964 WARN(1, "pipe_off wait timed out\n");
969 * ibx_digital_port_connected - is the specified port connected?
970 * @dev_priv: i915 private structure
971 * @port: the port to test
973 * Returns true if @port is connected, false otherwise.
975 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
976 struct intel_digital_port *port)
980 if (HAS_PCH_IBX(dev_priv->dev)) {
981 switch (port->port) {
983 bit = SDE_PORTB_HOTPLUG;
986 bit = SDE_PORTC_HOTPLUG;
989 bit = SDE_PORTD_HOTPLUG;
995 switch (port->port) {
997 bit = SDE_PORTB_HOTPLUG_CPT;
1000 bit = SDE_PORTC_HOTPLUG_CPT;
1003 bit = SDE_PORTD_HOTPLUG_CPT;
1010 return I915_READ(SDEISR) & bit;
1013 static const char *state_string(bool enabled)
1015 return enabled ? "on" : "off";
1018 /* Only for pre-ILK configs */
1019 void assert_pll(struct drm_i915_private *dev_priv,
1020 enum pipe pipe, bool state)
1027 val = I915_READ(reg);
1028 cur_state = !!(val & DPLL_VCO_ENABLE);
1029 I915_STATE_WARN(cur_state != state,
1030 "PLL state assertion failure (expected %s, current %s)\n",
1031 state_string(state), state_string(cur_state));
1034 /* XXX: the dsi pll is shared between MIPI DSI ports */
1035 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1040 mutex_lock(&dev_priv->dpio_lock);
1041 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1042 mutex_unlock(&dev_priv->dpio_lock);
1044 cur_state = val & DSI_PLL_VCO_EN;
1045 I915_STATE_WARN(cur_state != state,
1046 "DSI PLL state assertion failure (expected %s, current %s)\n",
1047 state_string(state), state_string(cur_state));
1049 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1050 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1052 struct intel_shared_dpll *
1053 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1055 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1057 if (crtc->config->shared_dpll < 0)
1060 return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1064 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1065 struct intel_shared_dpll *pll,
1069 struct intel_dpll_hw_state hw_state;
1072 "asserting DPLL %s with no DPLL\n", state_string(state)))
1075 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1076 I915_STATE_WARN(cur_state != state,
1077 "%s assertion failure (expected %s, current %s)\n",
1078 pll->name, state_string(state), state_string(cur_state));
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082 enum pipe pipe, bool state)
1087 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1090 if (HAS_DDI(dev_priv->dev)) {
1091 /* DDI does not have a specific FDI_TX register */
1092 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1093 val = I915_READ(reg);
1094 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1096 reg = FDI_TX_CTL(pipe);
1097 val = I915_READ(reg);
1098 cur_state = !!(val & FDI_TX_ENABLE);
1100 I915_STATE_WARN(cur_state != state,
1101 "FDI TX state assertion failure (expected %s, current %s)\n",
1102 state_string(state), state_string(cur_state));
1104 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1105 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1107 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1108 enum pipe pipe, bool state)
1114 reg = FDI_RX_CTL(pipe);
1115 val = I915_READ(reg);
1116 cur_state = !!(val & FDI_RX_ENABLE);
1117 I915_STATE_WARN(cur_state != state,
1118 "FDI RX state assertion failure (expected %s, current %s)\n",
1119 state_string(state), state_string(cur_state));
1121 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1122 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1124 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1130 /* ILK FDI PLL is always enabled */
1131 if (INTEL_INFO(dev_priv->dev)->gen == 5)
1134 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1135 if (HAS_DDI(dev_priv->dev))
1138 reg = FDI_TX_CTL(pipe);
1139 val = I915_READ(reg);
1140 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1143 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1144 enum pipe pipe, bool state)
1150 reg = FDI_RX_CTL(pipe);
1151 val = I915_READ(reg);
1152 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1153 I915_STATE_WARN(cur_state != state,
1154 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1155 state_string(state), state_string(cur_state));
1158 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1161 struct drm_device *dev = dev_priv->dev;
1164 enum pipe panel_pipe = PIPE_A;
1167 if (WARN_ON(HAS_DDI(dev)))
1170 if (HAS_PCH_SPLIT(dev)) {
1173 pp_reg = PCH_PP_CONTROL;
1174 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1176 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1177 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1178 panel_pipe = PIPE_B;
1179 /* XXX: else fix for eDP */
1180 } else if (IS_VALLEYVIEW(dev)) {
1181 /* presumably write lock depends on pipe, not port select */
1182 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1185 pp_reg = PP_CONTROL;
1186 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1187 panel_pipe = PIPE_B;
1190 val = I915_READ(pp_reg);
1191 if (!(val & PANEL_POWER_ON) ||
1192 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1195 I915_STATE_WARN(panel_pipe == pipe && locked,
1196 "panel assertion failure, pipe %c regs locked\n",
1200 static void assert_cursor(struct drm_i915_private *dev_priv,
1201 enum pipe pipe, bool state)
1203 struct drm_device *dev = dev_priv->dev;
1206 if (IS_845G(dev) || IS_I865G(dev))
1207 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1209 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1211 I915_STATE_WARN(cur_state != state,
1212 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1213 pipe_name(pipe), state_string(state), state_string(cur_state));
1215 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1216 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1218 void assert_pipe(struct drm_i915_private *dev_priv,
1219 enum pipe pipe, bool state)
1224 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1227 /* if we need the pipe quirk it must be always on */
1228 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1229 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1232 if (!intel_display_power_is_enabled(dev_priv,
1233 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1236 reg = PIPECONF(cpu_transcoder);
1237 val = I915_READ(reg);
1238 cur_state = !!(val & PIPECONF_ENABLE);
1241 I915_STATE_WARN(cur_state != state,
1242 "pipe %c assertion failure (expected %s, current %s)\n",
1243 pipe_name(pipe), state_string(state), state_string(cur_state));
1246 static void assert_plane(struct drm_i915_private *dev_priv,
1247 enum plane plane, bool state)
1253 reg = DSPCNTR(plane);
1254 val = I915_READ(reg);
1255 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1256 I915_STATE_WARN(cur_state != state,
1257 "plane %c assertion failure (expected %s, current %s)\n",
1258 plane_name(plane), state_string(state), state_string(cur_state));
1261 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1262 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1264 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1267 struct drm_device *dev = dev_priv->dev;
1272 /* Primary planes are fixed to pipes on gen4+ */
1273 if (INTEL_INFO(dev)->gen >= 4) {
1274 reg = DSPCNTR(pipe);
1275 val = I915_READ(reg);
1276 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1277 "plane %c assertion failure, should be disabled but not\n",
1282 /* Need to check both planes against the pipe */
1283 for_each_pipe(dev_priv, i) {
1285 val = I915_READ(reg);
1286 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1287 DISPPLANE_SEL_PIPE_SHIFT;
1288 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1289 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1290 plane_name(i), pipe_name(pipe));
1294 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1297 struct drm_device *dev = dev_priv->dev;
1301 if (INTEL_INFO(dev)->gen >= 9) {
1302 for_each_sprite(pipe, sprite) {
1303 val = I915_READ(PLANE_CTL(pipe, sprite));
1304 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1305 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1306 sprite, pipe_name(pipe));
1308 } else if (IS_VALLEYVIEW(dev)) {
1309 for_each_sprite(pipe, sprite) {
1310 reg = SPCNTR(pipe, sprite);
1311 val = I915_READ(reg);
1312 I915_STATE_WARN(val & SP_ENABLE,
1313 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1314 sprite_name(pipe, sprite), pipe_name(pipe));
1316 } else if (INTEL_INFO(dev)->gen >= 7) {
1318 val = I915_READ(reg);
1319 I915_STATE_WARN(val & SPRITE_ENABLE,
1320 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1321 plane_name(pipe), pipe_name(pipe));
1322 } else if (INTEL_INFO(dev)->gen >= 5) {
1323 reg = DVSCNTR(pipe);
1324 val = I915_READ(reg);
1325 I915_STATE_WARN(val & DVS_ENABLE,
1326 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1327 plane_name(pipe), pipe_name(pipe));
1331 static void assert_vblank_disabled(struct drm_crtc *crtc)
1333 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1334 drm_crtc_vblank_put(crtc);
1337 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1342 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1344 val = I915_READ(PCH_DREF_CONTROL);
1345 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1346 DREF_SUPERSPREAD_SOURCE_MASK));
1347 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1350 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1357 reg = PCH_TRANSCONF(pipe);
1358 val = I915_READ(reg);
1359 enabled = !!(val & TRANS_ENABLE);
1360 I915_STATE_WARN(enabled,
1361 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1365 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1366 enum pipe pipe, u32 port_sel, u32 val)
1368 if ((val & DP_PORT_EN) == 0)
1371 if (HAS_PCH_CPT(dev_priv->dev)) {
1372 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1373 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1374 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1376 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1377 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1380 if ((val & DP_PIPE_MASK) != (pipe << 30))
1386 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1387 enum pipe pipe, u32 val)
1389 if ((val & SDVO_ENABLE) == 0)
1392 if (HAS_PCH_CPT(dev_priv->dev)) {
1393 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1395 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1396 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1399 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1405 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1406 enum pipe pipe, u32 val)
1408 if ((val & LVDS_PORT_EN) == 0)
1411 if (HAS_PCH_CPT(dev_priv->dev)) {
1412 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1415 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1421 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1422 enum pipe pipe, u32 val)
1424 if ((val & ADPA_DAC_ENABLE) == 0)
1426 if (HAS_PCH_CPT(dev_priv->dev)) {
1427 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1430 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1436 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1437 enum pipe pipe, int reg, u32 port_sel)
1439 u32 val = I915_READ(reg);
1440 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1441 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1442 reg, pipe_name(pipe));
1444 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1445 && (val & DP_PIPEB_SELECT),
1446 "IBX PCH dp port still using transcoder B\n");
1449 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1450 enum pipe pipe, int reg)
1452 u32 val = I915_READ(reg);
1453 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1454 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1455 reg, pipe_name(pipe));
1457 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1458 && (val & SDVO_PIPE_B_SELECT),
1459 "IBX PCH hdmi port still using transcoder B\n");
1462 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1468 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1469 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1470 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1473 val = I915_READ(reg);
1474 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1475 "PCH VGA enabled on transcoder %c, should be disabled\n",
1479 val = I915_READ(reg);
1480 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1481 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1484 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1485 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1486 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1489 static void intel_init_dpio(struct drm_device *dev)
1491 struct drm_i915_private *dev_priv = dev->dev_private;
1493 if (!IS_VALLEYVIEW(dev))
1497 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1498 * CHV x1 PHY (DP/HDMI D)
1499 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1501 if (IS_CHERRYVIEW(dev)) {
1502 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1503 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1505 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1509 static void vlv_enable_pll(struct intel_crtc *crtc,
1510 const struct intel_crtc_state *pipe_config)
1512 struct drm_device *dev = crtc->base.dev;
1513 struct drm_i915_private *dev_priv = dev->dev_private;
1514 int reg = DPLL(crtc->pipe);
1515 u32 dpll = pipe_config->dpll_hw_state.dpll;
1517 assert_pipe_disabled(dev_priv, crtc->pipe);
1519 /* No really, not for ILK+ */
1520 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1522 /* PLL is protected by panel, make sure we can write it */
1523 if (IS_MOBILE(dev_priv->dev))
1524 assert_panel_unlocked(dev_priv, crtc->pipe);
1526 I915_WRITE(reg, dpll);
1530 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1531 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1533 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1534 POSTING_READ(DPLL_MD(crtc->pipe));
1536 /* We do this three times for luck */
1537 I915_WRITE(reg, dpll);
1539 udelay(150); /* wait for warmup */
1540 I915_WRITE(reg, dpll);
1542 udelay(150); /* wait for warmup */
1543 I915_WRITE(reg, dpll);
1545 udelay(150); /* wait for warmup */
1548 static void chv_enable_pll(struct intel_crtc *crtc,
1549 const struct intel_crtc_state *pipe_config)
1551 struct drm_device *dev = crtc->base.dev;
1552 struct drm_i915_private *dev_priv = dev->dev_private;
1553 int pipe = crtc->pipe;
1554 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1557 assert_pipe_disabled(dev_priv, crtc->pipe);
1559 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1561 mutex_lock(&dev_priv->dpio_lock);
1563 /* Enable back the 10bit clock to display controller */
1564 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1565 tmp |= DPIO_DCLKP_EN;
1566 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1569 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1574 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1576 /* Check PLL is locked */
1577 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1578 DRM_ERROR("PLL %d failed to lock\n", pipe);
1580 /* not sure when this should be written */
1581 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1582 POSTING_READ(DPLL_MD(pipe));
1584 mutex_unlock(&dev_priv->dpio_lock);
1587 static int intel_num_dvo_pipes(struct drm_device *dev)
1589 struct intel_crtc *crtc;
1592 for_each_intel_crtc(dev, crtc)
1593 count += crtc->active &&
1594 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1599 static void i9xx_enable_pll(struct intel_crtc *crtc)
1601 struct drm_device *dev = crtc->base.dev;
1602 struct drm_i915_private *dev_priv = dev->dev_private;
1603 int reg = DPLL(crtc->pipe);
1604 u32 dpll = crtc->config->dpll_hw_state.dpll;
1606 assert_pipe_disabled(dev_priv, crtc->pipe);
1608 /* No really, not for ILK+ */
1609 BUG_ON(INTEL_INFO(dev)->gen >= 5);
1611 /* PLL is protected by panel, make sure we can write it */
1612 if (IS_MOBILE(dev) && !IS_I830(dev))
1613 assert_panel_unlocked(dev_priv, crtc->pipe);
1615 /* Enable DVO 2x clock on both PLLs if necessary */
1616 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1618 * It appears to be important that we don't enable this
1619 * for the current pipe before otherwise configuring the
1620 * PLL. No idea how this should be handled if multiple
1621 * DVO outputs are enabled simultaneosly.
1623 dpll |= DPLL_DVO_2X_MODE;
1624 I915_WRITE(DPLL(!crtc->pipe),
1625 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1628 /* Wait for the clocks to stabilize. */
1632 if (INTEL_INFO(dev)->gen >= 4) {
1633 I915_WRITE(DPLL_MD(crtc->pipe),
1634 crtc->config->dpll_hw_state.dpll_md);
1636 /* The pixel multiplier can only be updated once the
1637 * DPLL is enabled and the clocks are stable.
1639 * So write it again.
1641 I915_WRITE(reg, dpll);
1644 /* We do this three times for luck */
1645 I915_WRITE(reg, dpll);
1647 udelay(150); /* wait for warmup */
1648 I915_WRITE(reg, dpll);
1650 udelay(150); /* wait for warmup */
1651 I915_WRITE(reg, dpll);
1653 udelay(150); /* wait for warmup */
1657 * i9xx_disable_pll - disable a PLL
1658 * @dev_priv: i915 private structure
1659 * @pipe: pipe PLL to disable
1661 * Disable the PLL for @pipe, making sure the pipe is off first.
1663 * Note! This is for pre-ILK only.
1665 static void i9xx_disable_pll(struct intel_crtc *crtc)
1667 struct drm_device *dev = crtc->base.dev;
1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 enum pipe pipe = crtc->pipe;
1671 /* Disable DVO 2x clock on both PLLs if necessary */
1673 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1674 intel_num_dvo_pipes(dev) == 1) {
1675 I915_WRITE(DPLL(PIPE_B),
1676 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1677 I915_WRITE(DPLL(PIPE_A),
1678 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1681 /* Don't disable pipe or pipe PLLs if needed */
1682 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1683 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1686 /* Make sure the pipe isn't still relying on us */
1687 assert_pipe_disabled(dev_priv, pipe);
1689 I915_WRITE(DPLL(pipe), 0);
1690 POSTING_READ(DPLL(pipe));
1693 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1697 /* Make sure the pipe isn't still relying on us */
1698 assert_pipe_disabled(dev_priv, pipe);
1701 * Leave integrated clock source and reference clock enabled for pipe B.
1702 * The latter is needed for VGA hotplug / manual detection.
1705 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1706 I915_WRITE(DPLL(pipe), val);
1707 POSTING_READ(DPLL(pipe));
1711 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1713 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1716 /* Make sure the pipe isn't still relying on us */
1717 assert_pipe_disabled(dev_priv, pipe);
1719 /* Set PLL en = 0 */
1720 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1722 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1723 I915_WRITE(DPLL(pipe), val);
1724 POSTING_READ(DPLL(pipe));
1726 mutex_lock(&dev_priv->dpio_lock);
1728 /* Disable 10bit clock to display controller */
1729 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1730 val &= ~DPIO_DCLKP_EN;
1731 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1733 /* disable left/right clock distribution */
1734 if (pipe != PIPE_B) {
1735 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1736 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1737 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1739 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1740 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1741 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1744 mutex_unlock(&dev_priv->dpio_lock);
1747 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1748 struct intel_digital_port *dport)
1753 switch (dport->port) {
1755 port_mask = DPLL_PORTB_READY_MASK;
1759 port_mask = DPLL_PORTC_READY_MASK;
1763 port_mask = DPLL_PORTD_READY_MASK;
1764 dpll_reg = DPIO_PHY_STATUS;
1770 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1771 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1772 port_name(dport->port), I915_READ(dpll_reg));
1775 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1777 struct drm_device *dev = crtc->base.dev;
1778 struct drm_i915_private *dev_priv = dev->dev_private;
1779 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1781 if (WARN_ON(pll == NULL))
1784 WARN_ON(!pll->config.crtc_mask);
1785 if (pll->active == 0) {
1786 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1788 assert_shared_dpll_disabled(dev_priv, pll);
1790 pll->mode_set(dev_priv, pll);
1795 * intel_enable_shared_dpll - enable PCH PLL
1796 * @dev_priv: i915 private structure
1797 * @pipe: pipe PLL to enable
1799 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1800 * drives the transcoder clock.
1802 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1804 struct drm_device *dev = crtc->base.dev;
1805 struct drm_i915_private *dev_priv = dev->dev_private;
1806 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1808 if (WARN_ON(pll == NULL))
1811 if (WARN_ON(pll->config.crtc_mask == 0))
1814 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1815 pll->name, pll->active, pll->on,
1816 crtc->base.base.id);
1818 if (pll->active++) {
1820 assert_shared_dpll_enabled(dev_priv, pll);
1825 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1827 DRM_DEBUG_KMS("enabling %s\n", pll->name);
1828 pll->enable(dev_priv, pll);
1832 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1834 struct drm_device *dev = crtc->base.dev;
1835 struct drm_i915_private *dev_priv = dev->dev_private;
1836 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1838 /* PCH only available on ILK+ */
1839 BUG_ON(INTEL_INFO(dev)->gen < 5);
1840 if (WARN_ON(pll == NULL))
1843 if (WARN_ON(pll->config.crtc_mask == 0))
1846 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1847 pll->name, pll->active, pll->on,
1848 crtc->base.base.id);
1850 if (WARN_ON(pll->active == 0)) {
1851 assert_shared_dpll_disabled(dev_priv, pll);
1855 assert_shared_dpll_enabled(dev_priv, pll);
1860 DRM_DEBUG_KMS("disabling %s\n", pll->name);
1861 pll->disable(dev_priv, pll);
1864 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1867 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1870 struct drm_device *dev = dev_priv->dev;
1871 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1873 uint32_t reg, val, pipeconf_val;
1875 /* PCH only available on ILK+ */
1876 BUG_ON(!HAS_PCH_SPLIT(dev));
1878 /* Make sure PCH DPLL is enabled */
1879 assert_shared_dpll_enabled(dev_priv,
1880 intel_crtc_to_shared_dpll(intel_crtc));
1882 /* FDI must be feeding us bits for PCH ports */
1883 assert_fdi_tx_enabled(dev_priv, pipe);
1884 assert_fdi_rx_enabled(dev_priv, pipe);
1886 if (HAS_PCH_CPT(dev)) {
1887 /* Workaround: Set the timing override bit before enabling the
1888 * pch transcoder. */
1889 reg = TRANS_CHICKEN2(pipe);
1890 val = I915_READ(reg);
1891 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1892 I915_WRITE(reg, val);
1895 reg = PCH_TRANSCONF(pipe);
1896 val = I915_READ(reg);
1897 pipeconf_val = I915_READ(PIPECONF(pipe));
1899 if (HAS_PCH_IBX(dev_priv->dev)) {
1901 * make the BPC in transcoder be consistent with
1902 * that in pipeconf reg.
1904 val &= ~PIPECONF_BPC_MASK;
1905 val |= pipeconf_val & PIPECONF_BPC_MASK;
1908 val &= ~TRANS_INTERLACE_MASK;
1909 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1910 if (HAS_PCH_IBX(dev_priv->dev) &&
1911 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1912 val |= TRANS_LEGACY_INTERLACED_ILK;
1914 val |= TRANS_INTERLACED;
1916 val |= TRANS_PROGRESSIVE;
1918 I915_WRITE(reg, val | TRANS_ENABLE);
1919 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1920 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1923 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1924 enum transcoder cpu_transcoder)
1926 u32 val, pipeconf_val;
1928 /* PCH only available on ILK+ */
1929 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1931 /* FDI must be feeding us bits for PCH ports */
1932 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1933 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1935 /* Workaround: set timing override bit. */
1936 val = I915_READ(_TRANSA_CHICKEN2);
1937 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1938 I915_WRITE(_TRANSA_CHICKEN2, val);
1941 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1943 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1944 PIPECONF_INTERLACED_ILK)
1945 val |= TRANS_INTERLACED;
1947 val |= TRANS_PROGRESSIVE;
1949 I915_WRITE(LPT_TRANSCONF, val);
1950 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1951 DRM_ERROR("Failed to enable PCH transcoder\n");
1954 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1957 struct drm_device *dev = dev_priv->dev;
1960 /* FDI relies on the transcoder */
1961 assert_fdi_tx_disabled(dev_priv, pipe);
1962 assert_fdi_rx_disabled(dev_priv, pipe);
1964 /* Ports must be off as well */
1965 assert_pch_ports_disabled(dev_priv, pipe);
1967 reg = PCH_TRANSCONF(pipe);
1968 val = I915_READ(reg);
1969 val &= ~TRANS_ENABLE;
1970 I915_WRITE(reg, val);
1971 /* wait for PCH transcoder off, transcoder state */
1972 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1973 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1975 if (!HAS_PCH_IBX(dev)) {
1976 /* Workaround: Clear the timing override chicken bit again. */
1977 reg = TRANS_CHICKEN2(pipe);
1978 val = I915_READ(reg);
1979 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1980 I915_WRITE(reg, val);
1984 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1988 val = I915_READ(LPT_TRANSCONF);
1989 val &= ~TRANS_ENABLE;
1990 I915_WRITE(LPT_TRANSCONF, val);
1991 /* wait for PCH transcoder off, transcoder state */
1992 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1993 DRM_ERROR("Failed to disable PCH transcoder\n");
1995 /* Workaround: clear timing override bit. */
1996 val = I915_READ(_TRANSA_CHICKEN2);
1997 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1998 I915_WRITE(_TRANSA_CHICKEN2, val);
2002 * intel_enable_pipe - enable a pipe, asserting requirements
2003 * @crtc: crtc responsible for the pipe
2005 * Enable @crtc's pipe, making sure that various hardware specific requirements
2006 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2008 static void intel_enable_pipe(struct intel_crtc *crtc)
2010 struct drm_device *dev = crtc->base.dev;
2011 struct drm_i915_private *dev_priv = dev->dev_private;
2012 enum pipe pipe = crtc->pipe;
2013 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2015 enum pipe pch_transcoder;
2019 assert_planes_disabled(dev_priv, pipe);
2020 assert_cursor_disabled(dev_priv, pipe);
2021 assert_sprites_disabled(dev_priv, pipe);
2023 if (HAS_PCH_LPT(dev_priv->dev))
2024 pch_transcoder = TRANSCODER_A;
2026 pch_transcoder = pipe;
2029 * A pipe without a PLL won't actually be able to drive bits from
2030 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
2033 if (!HAS_PCH_SPLIT(dev_priv->dev))
2034 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2035 assert_dsi_pll_enabled(dev_priv);
2037 assert_pll_enabled(dev_priv, pipe);
2039 if (crtc->config->has_pch_encoder) {
2040 /* if driving the PCH, we need FDI enabled */
2041 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2042 assert_fdi_tx_pll_enabled(dev_priv,
2043 (enum pipe) cpu_transcoder);
2045 /* FIXME: assert CPU port conditions for SNB+ */
2048 reg = PIPECONF(cpu_transcoder);
2049 val = I915_READ(reg);
2050 if (val & PIPECONF_ENABLE) {
2051 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2052 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2056 I915_WRITE(reg, val | PIPECONF_ENABLE);
2061 * intel_disable_pipe - disable a pipe, asserting requirements
2062 * @crtc: crtc whose pipes is to be disabled
2064 * Disable the pipe of @crtc, making sure that various hardware
2065 * specific requirements are met, if applicable, e.g. plane
2066 * disabled, panel fitter off, etc.
2068 * Will wait until the pipe has shut down before returning.
2070 static void intel_disable_pipe(struct intel_crtc *crtc)
2072 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2073 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2074 enum pipe pipe = crtc->pipe;
2079 * Make sure planes won't keep trying to pump pixels to us,
2080 * or we might hang the display.
2082 assert_planes_disabled(dev_priv, pipe);
2083 assert_cursor_disabled(dev_priv, pipe);
2084 assert_sprites_disabled(dev_priv, pipe);
2086 reg = PIPECONF(cpu_transcoder);
2087 val = I915_READ(reg);
2088 if ((val & PIPECONF_ENABLE) == 0)
2092 * Double wide has implications for planes
2093 * so best keep it disabled when not needed.
2095 if (crtc->config->double_wide)
2096 val &= ~PIPECONF_DOUBLE_WIDE;
2098 /* Don't disable pipe or pipe PLLs if needed */
2099 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2100 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2101 val &= ~PIPECONF_ENABLE;
2103 I915_WRITE(reg, val);
2104 if ((val & PIPECONF_ENABLE) == 0)
2105 intel_wait_for_pipe_off(crtc);
2109 * Plane regs are double buffered, going from enabled->disabled needs a
2110 * trigger in order to latch. The display address reg provides this.
2112 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2115 struct drm_device *dev = dev_priv->dev;
2116 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2118 I915_WRITE(reg, I915_READ(reg));
2123 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2124 * @plane: plane to be enabled
2125 * @crtc: crtc for the plane
2127 * Enable @plane on @crtc, making sure that the pipe is running first.
2129 static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2130 struct drm_crtc *crtc)
2132 struct drm_device *dev = plane->dev;
2133 struct drm_i915_private *dev_priv = dev->dev_private;
2134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2136 /* If the pipe isn't enabled, we can't pump pixels and may hang */
2137 assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2139 if (intel_crtc->primary_enabled)
2142 intel_crtc->primary_enabled = true;
2144 dev_priv->display.update_primary_plane(crtc, plane->fb,
2148 * BDW signals flip done immediately if the plane
2149 * is disabled, even if the plane enable is already
2150 * armed to occur at the next vblank :(
2152 if (IS_BROADWELL(dev))
2153 intel_wait_for_vblank(dev, intel_crtc->pipe);
2157 * intel_disable_primary_hw_plane - disable the primary hardware plane
2158 * @plane: plane to be disabled
2159 * @crtc: crtc for the plane
2161 * Disable @plane on @crtc, making sure that the pipe is running first.
2163 static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2164 struct drm_crtc *crtc)
2166 struct drm_device *dev = plane->dev;
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2170 if (WARN_ON(!intel_crtc->active))
2173 if (!intel_crtc->primary_enabled)
2176 intel_crtc->primary_enabled = false;
2178 dev_priv->display.update_primary_plane(crtc, plane->fb,
2182 static bool need_vtd_wa(struct drm_device *dev)
2184 #ifdef CONFIG_INTEL_IOMMU
2185 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2192 intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
2196 tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
2197 return ALIGN(height, tile_height);
2201 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2202 struct drm_framebuffer *fb,
2203 struct intel_engine_cs *pipelined)
2205 struct drm_device *dev = fb->dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2211 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2213 switch (obj->tiling_mode) {
2214 case I915_TILING_NONE:
2215 if (INTEL_INFO(dev)->gen >= 9)
2216 alignment = 256 * 1024;
2217 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2218 alignment = 128 * 1024;
2219 else if (INTEL_INFO(dev)->gen >= 4)
2220 alignment = 4 * 1024;
2222 alignment = 64 * 1024;
2225 if (INTEL_INFO(dev)->gen >= 9)
2226 alignment = 256 * 1024;
2228 /* pin() will align the object as required by fence */
2233 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2239 /* Note that the w/a also requires 64 PTE of padding following the
2240 * bo. We currently fill all unused PTE with the shadow page and so
2241 * we should always have valid PTE following the scanout preventing
2244 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245 alignment = 256 * 1024;
2248 * Global gtt pte registers are special registers which actually forward
2249 * writes to a chunk of system memory. Which means that there is no risk
2250 * that the register values disappear as soon as we call
2251 * intel_runtime_pm_put(), so it is correct to wrap only the
2252 * pin/unpin/fence and not more.
2254 intel_runtime_pm_get(dev_priv);
2256 dev_priv->mm.interruptible = false;
2257 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2259 goto err_interruptible;
2261 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2262 * fence, whereas 965+ only requires a fence if using
2263 * framebuffer compression. For simplicity, we always install
2264 * a fence as the cost is not that onerous.
2266 ret = i915_gem_object_get_fence(obj);
2270 i915_gem_object_pin_fence(obj);
2272 dev_priv->mm.interruptible = true;
2273 intel_runtime_pm_put(dev_priv);
2277 i915_gem_object_unpin_from_display_plane(obj);
2279 dev_priv->mm.interruptible = true;
2280 intel_runtime_pm_put(dev_priv);
2284 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2286 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2288 i915_gem_object_unpin_fence(obj);
2289 i915_gem_object_unpin_from_display_plane(obj);
2292 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2293 * is assumed to be a power-of-two. */
2294 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2295 unsigned int tiling_mode,
2299 if (tiling_mode != I915_TILING_NONE) {
2300 unsigned int tile_rows, tiles;
2305 tiles = *x / (512/cpp);
2308 return tile_rows * pitch * 8 + tiles * 4096;
2310 unsigned int offset;
2312 offset = *y * pitch + *x * cpp;
2314 *x = (offset & 4095) / cpp;
2315 return offset & -4096;
2319 static int i9xx_format_to_fourcc(int format)
2322 case DISPPLANE_8BPP:
2323 return DRM_FORMAT_C8;
2324 case DISPPLANE_BGRX555:
2325 return DRM_FORMAT_XRGB1555;
2326 case DISPPLANE_BGRX565:
2327 return DRM_FORMAT_RGB565;
2329 case DISPPLANE_BGRX888:
2330 return DRM_FORMAT_XRGB8888;
2331 case DISPPLANE_RGBX888:
2332 return DRM_FORMAT_XBGR8888;
2333 case DISPPLANE_BGRX101010:
2334 return DRM_FORMAT_XRGB2101010;
2335 case DISPPLANE_RGBX101010:
2336 return DRM_FORMAT_XBGR2101010;
2340 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2343 case PLANE_CTL_FORMAT_RGB_565:
2344 return DRM_FORMAT_RGB565;
2346 case PLANE_CTL_FORMAT_XRGB_8888:
2349 return DRM_FORMAT_ABGR8888;
2351 return DRM_FORMAT_XBGR8888;
2354 return DRM_FORMAT_ARGB8888;
2356 return DRM_FORMAT_XRGB8888;
2358 case PLANE_CTL_FORMAT_XRGB_2101010:
2360 return DRM_FORMAT_XBGR2101010;
2362 return DRM_FORMAT_XRGB2101010;
2367 intel_alloc_plane_obj(struct intel_crtc *crtc,
2368 struct intel_initial_plane_config *plane_config)
2370 struct drm_device *dev = crtc->base.dev;
2371 struct drm_i915_gem_object *obj = NULL;
2372 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2373 u32 base = plane_config->base;
2375 if (plane_config->size == 0)
2378 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2379 plane_config->size);
2383 obj->tiling_mode = plane_config->tiling;
2384 if (obj->tiling_mode == I915_TILING_X)
2385 obj->stride = crtc->base.primary->fb->pitches[0];
2387 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2388 mode_cmd.width = crtc->base.primary->fb->width;
2389 mode_cmd.height = crtc->base.primary->fb->height;
2390 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2392 mutex_lock(&dev->struct_mutex);
2394 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2396 DRM_DEBUG_KMS("intel fb init failed\n");
2400 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2401 mutex_unlock(&dev->struct_mutex);
2403 DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2407 drm_gem_object_unreference(&obj->base);
2408 mutex_unlock(&dev->struct_mutex);
2413 intel_find_plane_obj(struct intel_crtc *intel_crtc,
2414 struct intel_initial_plane_config *plane_config)
2416 struct drm_device *dev = intel_crtc->base.dev;
2417 struct drm_i915_private *dev_priv = dev->dev_private;
2419 struct intel_crtc *i;
2420 struct drm_i915_gem_object *obj;
2422 if (!intel_crtc->base.primary->fb)
2425 if (intel_alloc_plane_obj(intel_crtc, plane_config))
2428 kfree(intel_crtc->base.primary->fb);
2429 intel_crtc->base.primary->fb = NULL;
2432 * Failed to alloc the obj, check to see if we should share
2433 * an fb with another CRTC instead
2435 for_each_crtc(dev, c) {
2436 i = to_intel_crtc(c);
2438 if (c == &intel_crtc->base)
2444 obj = intel_fb_obj(c->primary->fb);
2448 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2449 if (obj->tiling_mode != I915_TILING_NONE)
2450 dev_priv->preserve_bios_swizzle = true;
2452 drm_framebuffer_reference(c->primary->fb);
2453 intel_crtc->base.primary->fb = c->primary->fb;
2454 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2460 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2461 struct drm_framebuffer *fb,
2464 struct drm_device *dev = crtc->dev;
2465 struct drm_i915_private *dev_priv = dev->dev_private;
2466 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2467 struct drm_i915_gem_object *obj;
2468 int plane = intel_crtc->plane;
2469 unsigned long linear_offset;
2471 u32 reg = DSPCNTR(plane);
2474 if (!intel_crtc->primary_enabled) {
2476 if (INTEL_INFO(dev)->gen >= 4)
2477 I915_WRITE(DSPSURF(plane), 0);
2479 I915_WRITE(DSPADDR(plane), 0);
2484 obj = intel_fb_obj(fb);
2485 if (WARN_ON(obj == NULL))
2488 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2490 dspcntr = DISPPLANE_GAMMA_ENABLE;
2492 dspcntr |= DISPLAY_PLANE_ENABLE;
2494 if (INTEL_INFO(dev)->gen < 4) {
2495 if (intel_crtc->pipe == PIPE_B)
2496 dspcntr |= DISPPLANE_SEL_PIPE_B;
2498 /* pipesrc and dspsize control the size that is scaled from,
2499 * which should always be the user's requested size.
2501 I915_WRITE(DSPSIZE(plane),
2502 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2503 (intel_crtc->config->pipe_src_w - 1));
2504 I915_WRITE(DSPPOS(plane), 0);
2505 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2506 I915_WRITE(PRIMSIZE(plane),
2507 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2508 (intel_crtc->config->pipe_src_w - 1));
2509 I915_WRITE(PRIMPOS(plane), 0);
2510 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2513 switch (fb->pixel_format) {
2515 dspcntr |= DISPPLANE_8BPP;
2517 case DRM_FORMAT_XRGB1555:
2518 case DRM_FORMAT_ARGB1555:
2519 dspcntr |= DISPPLANE_BGRX555;
2521 case DRM_FORMAT_RGB565:
2522 dspcntr |= DISPPLANE_BGRX565;
2524 case DRM_FORMAT_XRGB8888:
2525 case DRM_FORMAT_ARGB8888:
2526 dspcntr |= DISPPLANE_BGRX888;
2528 case DRM_FORMAT_XBGR8888:
2529 case DRM_FORMAT_ABGR8888:
2530 dspcntr |= DISPPLANE_RGBX888;
2532 case DRM_FORMAT_XRGB2101010:
2533 case DRM_FORMAT_ARGB2101010:
2534 dspcntr |= DISPPLANE_BGRX101010;
2536 case DRM_FORMAT_XBGR2101010:
2537 case DRM_FORMAT_ABGR2101010:
2538 dspcntr |= DISPPLANE_RGBX101010;
2544 if (INTEL_INFO(dev)->gen >= 4 &&
2545 obj->tiling_mode != I915_TILING_NONE)
2546 dspcntr |= DISPPLANE_TILED;
2549 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2551 linear_offset = y * fb->pitches[0] + x * pixel_size;
2553 if (INTEL_INFO(dev)->gen >= 4) {
2554 intel_crtc->dspaddr_offset =
2555 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2558 linear_offset -= intel_crtc->dspaddr_offset;
2560 intel_crtc->dspaddr_offset = linear_offset;
2563 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2564 dspcntr |= DISPPLANE_ROTATE_180;
2566 x += (intel_crtc->config->pipe_src_w - 1);
2567 y += (intel_crtc->config->pipe_src_h - 1);
2569 /* Finding the last pixel of the last line of the display
2570 data and adding to linear_offset*/
2572 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2573 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2576 I915_WRITE(reg, dspcntr);
2578 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2579 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2581 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2582 if (INTEL_INFO(dev)->gen >= 4) {
2583 I915_WRITE(DSPSURF(plane),
2584 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2585 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2586 I915_WRITE(DSPLINOFF(plane), linear_offset);
2588 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2592 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2593 struct drm_framebuffer *fb,
2596 struct drm_device *dev = crtc->dev;
2597 struct drm_i915_private *dev_priv = dev->dev_private;
2598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2599 struct drm_i915_gem_object *obj;
2600 int plane = intel_crtc->plane;
2601 unsigned long linear_offset;
2603 u32 reg = DSPCNTR(plane);
2606 if (!intel_crtc->primary_enabled) {
2608 I915_WRITE(DSPSURF(plane), 0);
2613 obj = intel_fb_obj(fb);
2614 if (WARN_ON(obj == NULL))
2617 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2619 dspcntr = DISPPLANE_GAMMA_ENABLE;
2621 dspcntr |= DISPLAY_PLANE_ENABLE;
2623 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2624 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2626 switch (fb->pixel_format) {
2628 dspcntr |= DISPPLANE_8BPP;
2630 case DRM_FORMAT_RGB565:
2631 dspcntr |= DISPPLANE_BGRX565;
2633 case DRM_FORMAT_XRGB8888:
2634 case DRM_FORMAT_ARGB8888:
2635 dspcntr |= DISPPLANE_BGRX888;
2637 case DRM_FORMAT_XBGR8888:
2638 case DRM_FORMAT_ABGR8888:
2639 dspcntr |= DISPPLANE_RGBX888;
2641 case DRM_FORMAT_XRGB2101010:
2642 case DRM_FORMAT_ARGB2101010:
2643 dspcntr |= DISPPLANE_BGRX101010;
2645 case DRM_FORMAT_XBGR2101010:
2646 case DRM_FORMAT_ABGR2101010:
2647 dspcntr |= DISPPLANE_RGBX101010;
2653 if (obj->tiling_mode != I915_TILING_NONE)
2654 dspcntr |= DISPPLANE_TILED;
2656 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2657 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2659 linear_offset = y * fb->pitches[0] + x * pixel_size;
2660 intel_crtc->dspaddr_offset =
2661 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2664 linear_offset -= intel_crtc->dspaddr_offset;
2665 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2666 dspcntr |= DISPPLANE_ROTATE_180;
2668 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2669 x += (intel_crtc->config->pipe_src_w - 1);
2670 y += (intel_crtc->config->pipe_src_h - 1);
2672 /* Finding the last pixel of the last line of the display
2673 data and adding to linear_offset*/
2675 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2676 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2680 I915_WRITE(reg, dspcntr);
2682 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2683 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2685 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2686 I915_WRITE(DSPSURF(plane),
2687 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2688 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2689 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2691 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2692 I915_WRITE(DSPLINOFF(plane), linear_offset);
2697 static void skylake_update_primary_plane(struct drm_crtc *crtc,
2698 struct drm_framebuffer *fb,
2701 struct drm_device *dev = crtc->dev;
2702 struct drm_i915_private *dev_priv = dev->dev_private;
2703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2704 struct intel_framebuffer *intel_fb;
2705 struct drm_i915_gem_object *obj;
2706 int pipe = intel_crtc->pipe;
2707 u32 plane_ctl, stride;
2709 if (!intel_crtc->primary_enabled) {
2710 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2711 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2712 POSTING_READ(PLANE_CTL(pipe, 0));
2716 plane_ctl = PLANE_CTL_ENABLE |
2717 PLANE_CTL_PIPE_GAMMA_ENABLE |
2718 PLANE_CTL_PIPE_CSC_ENABLE;
2720 switch (fb->pixel_format) {
2721 case DRM_FORMAT_RGB565:
2722 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2724 case DRM_FORMAT_XRGB8888:
2725 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2727 case DRM_FORMAT_XBGR8888:
2728 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2729 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2731 case DRM_FORMAT_XRGB2101010:
2732 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2734 case DRM_FORMAT_XBGR2101010:
2735 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2736 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2742 intel_fb = to_intel_framebuffer(fb);
2743 obj = intel_fb->obj;
2746 * The stride is either expressed as a multiple of 64 bytes chunks for
2747 * linear buffers or in number of tiles for tiled buffers.
2749 switch (obj->tiling_mode) {
2750 case I915_TILING_NONE:
2751 stride = fb->pitches[0] >> 6;
2754 plane_ctl |= PLANE_CTL_TILED_X;
2755 stride = fb->pitches[0] >> 9;
2761 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2762 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2763 plane_ctl |= PLANE_CTL_ROTATE_180;
2765 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2767 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2768 i915_gem_obj_ggtt_offset(obj),
2769 x, y, fb->width, fb->height,
2772 I915_WRITE(PLANE_POS(pipe, 0), 0);
2773 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2774 I915_WRITE(PLANE_SIZE(pipe, 0),
2775 (intel_crtc->config->pipe_src_h - 1) << 16 |
2776 (intel_crtc->config->pipe_src_w - 1));
2777 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2778 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2780 POSTING_READ(PLANE_SURF(pipe, 0));
2783 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2785 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2786 int x, int y, enum mode_set_atomic state)
2788 struct drm_device *dev = crtc->dev;
2789 struct drm_i915_private *dev_priv = dev->dev_private;
2791 if (dev_priv->display.disable_fbc)
2792 dev_priv->display.disable_fbc(dev);
2794 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2799 static void intel_complete_page_flips(struct drm_device *dev)
2801 struct drm_crtc *crtc;
2803 for_each_crtc(dev, crtc) {
2804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2805 enum plane plane = intel_crtc->plane;
2807 intel_prepare_page_flip(dev, plane);
2808 intel_finish_page_flip_plane(dev, plane);
2812 static void intel_update_primary_planes(struct drm_device *dev)
2814 struct drm_i915_private *dev_priv = dev->dev_private;
2815 struct drm_crtc *crtc;
2817 for_each_crtc(dev, crtc) {
2818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2820 drm_modeset_lock(&crtc->mutex, NULL);
2822 * FIXME: Once we have proper support for primary planes (and
2823 * disabling them without disabling the entire crtc) allow again
2824 * a NULL crtc->primary->fb.
2826 if (intel_crtc->active && crtc->primary->fb)
2827 dev_priv->display.update_primary_plane(crtc,
2831 drm_modeset_unlock(&crtc->mutex);
2835 void intel_prepare_reset(struct drm_device *dev)
2837 struct drm_i915_private *dev_priv = to_i915(dev);
2838 struct intel_crtc *crtc;
2840 /* no reset support for gen2 */
2844 /* reset doesn't touch the display */
2845 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2848 drm_modeset_lock_all(dev);
2851 * Disabling the crtcs gracefully seems nicer. Also the
2852 * g33 docs say we should at least disable all the planes.
2854 for_each_intel_crtc(dev, crtc) {
2856 dev_priv->display.crtc_disable(&crtc->base);
2860 void intel_finish_reset(struct drm_device *dev)
2862 struct drm_i915_private *dev_priv = to_i915(dev);
2865 * Flips in the rings will be nuked by the reset,
2866 * so complete all pending flips so that user space
2867 * will get its events and not get stuck.
2869 intel_complete_page_flips(dev);
2871 /* no reset support for gen2 */
2875 /* reset doesn't touch the display */
2876 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2878 * Flips in the rings have been nuked by the reset,
2879 * so update the base address of all primary
2880 * planes to the the last fb to make sure we're
2881 * showing the correct fb after a reset.
2883 intel_update_primary_planes(dev);
2888 * The display has been reset as well,
2889 * so need a full re-initialization.
2891 intel_runtime_pm_disable_interrupts(dev_priv);
2892 intel_runtime_pm_enable_interrupts(dev_priv);
2894 intel_modeset_init_hw(dev);
2896 spin_lock_irq(&dev_priv->irq_lock);
2897 if (dev_priv->display.hpd_irq_setup)
2898 dev_priv->display.hpd_irq_setup(dev);
2899 spin_unlock_irq(&dev_priv->irq_lock);
2901 intel_modeset_setup_hw_state(dev, true);
2903 intel_hpd_init(dev_priv);
2905 drm_modeset_unlock_all(dev);
2909 intel_finish_fb(struct drm_framebuffer *old_fb)
2911 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2912 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2913 bool was_interruptible = dev_priv->mm.interruptible;
2916 /* Big Hammer, we also need to ensure that any pending
2917 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2918 * current scanout is retired before unpinning the old
2921 * This should only fail upon a hung GPU, in which case we
2922 * can safely continue.
2924 dev_priv->mm.interruptible = false;
2925 ret = i915_gem_object_finish_gpu(obj);
2926 dev_priv->mm.interruptible = was_interruptible;
2931 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2933 struct drm_device *dev = crtc->dev;
2934 struct drm_i915_private *dev_priv = dev->dev_private;
2935 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2938 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2939 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2942 spin_lock_irq(&dev->event_lock);
2943 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2944 spin_unlock_irq(&dev->event_lock);
2949 static void intel_update_pipe_size(struct intel_crtc *crtc)
2951 struct drm_device *dev = crtc->base.dev;
2952 struct drm_i915_private *dev_priv = dev->dev_private;
2953 const struct drm_display_mode *adjusted_mode;
2959 * Update pipe size and adjust fitter if needed: the reason for this is
2960 * that in compute_mode_changes we check the native mode (not the pfit
2961 * mode) to see if we can flip rather than do a full mode set. In the
2962 * fastboot case, we'll flip, but if we don't update the pipesrc and
2963 * pfit state, we'll end up with a big fb scanned out into the wrong
2966 * To fix this properly, we need to hoist the checks up into
2967 * compute_mode_changes (or above), check the actual pfit state and
2968 * whether the platform allows pfit disable with pipe active, and only
2969 * then update the pipesrc and pfit state, even on the flip path.
2972 adjusted_mode = &crtc->config->base.adjusted_mode;
2974 I915_WRITE(PIPESRC(crtc->pipe),
2975 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2976 (adjusted_mode->crtc_vdisplay - 1));
2977 if (!crtc->config->pch_pfit.enabled &&
2978 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2979 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2980 I915_WRITE(PF_CTL(crtc->pipe), 0);
2981 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2982 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2984 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
2985 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
2988 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2990 struct drm_device *dev = crtc->dev;
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2993 int pipe = intel_crtc->pipe;
2996 /* enable normal train */
2997 reg = FDI_TX_CTL(pipe);
2998 temp = I915_READ(reg);
2999 if (IS_IVYBRIDGE(dev)) {
3000 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3001 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3003 temp &= ~FDI_LINK_TRAIN_NONE;
3004 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3006 I915_WRITE(reg, temp);
3008 reg = FDI_RX_CTL(pipe);
3009 temp = I915_READ(reg);
3010 if (HAS_PCH_CPT(dev)) {
3011 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3012 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3014 temp &= ~FDI_LINK_TRAIN_NONE;
3015 temp |= FDI_LINK_TRAIN_NONE;
3017 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3019 /* wait one idle pattern time */
3023 /* IVB wants error correction enabled */
3024 if (IS_IVYBRIDGE(dev))
3025 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3026 FDI_FE_ERRC_ENABLE);
3029 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3031 return crtc->base.enabled && crtc->active &&
3032 crtc->config->has_pch_encoder;
3035 static void ivb_modeset_global_resources(struct drm_device *dev)
3037 struct drm_i915_private *dev_priv = dev->dev_private;
3038 struct intel_crtc *pipe_B_crtc =
3039 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3040 struct intel_crtc *pipe_C_crtc =
3041 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3045 * When everything is off disable fdi C so that we could enable fdi B
3046 * with all lanes. Note that we don't care about enabled pipes without
3047 * an enabled pch encoder.
3049 if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3050 !pipe_has_enabled_pch(pipe_C_crtc)) {
3051 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3052 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3054 temp = I915_READ(SOUTH_CHICKEN1);
3055 temp &= ~FDI_BC_BIFURCATION_SELECT;
3056 DRM_DEBUG_KMS("disabling fdi C rx\n");
3057 I915_WRITE(SOUTH_CHICKEN1, temp);
3061 /* The FDI link training functions for ILK/Ibexpeak. */
3062 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3064 struct drm_device *dev = crtc->dev;
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3067 int pipe = intel_crtc->pipe;
3068 u32 reg, temp, tries;
3070 /* FDI needs bits from pipe first */
3071 assert_pipe_enabled(dev_priv, pipe);
3073 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3075 reg = FDI_RX_IMR(pipe);
3076 temp = I915_READ(reg);
3077 temp &= ~FDI_RX_SYMBOL_LOCK;
3078 temp &= ~FDI_RX_BIT_LOCK;
3079 I915_WRITE(reg, temp);
3083 /* enable CPU FDI TX and PCH FDI RX */
3084 reg = FDI_TX_CTL(pipe);
3085 temp = I915_READ(reg);
3086 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3087 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3088 temp &= ~FDI_LINK_TRAIN_NONE;
3089 temp |= FDI_LINK_TRAIN_PATTERN_1;
3090 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3092 reg = FDI_RX_CTL(pipe);
3093 temp = I915_READ(reg);
3094 temp &= ~FDI_LINK_TRAIN_NONE;
3095 temp |= FDI_LINK_TRAIN_PATTERN_1;
3096 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3101 /* Ironlake workaround, enable clock pointer after FDI enable*/
3102 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3103 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3104 FDI_RX_PHASE_SYNC_POINTER_EN);
3106 reg = FDI_RX_IIR(pipe);
3107 for (tries = 0; tries < 5; tries++) {
3108 temp = I915_READ(reg);
3109 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3111 if ((temp & FDI_RX_BIT_LOCK)) {
3112 DRM_DEBUG_KMS("FDI train 1 done.\n");
3113 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3118 DRM_ERROR("FDI train 1 fail!\n");
3121 reg = FDI_TX_CTL(pipe);
3122 temp = I915_READ(reg);
3123 temp &= ~FDI_LINK_TRAIN_NONE;
3124 temp |= FDI_LINK_TRAIN_PATTERN_2;
3125 I915_WRITE(reg, temp);
3127 reg = FDI_RX_CTL(pipe);
3128 temp = I915_READ(reg);
3129 temp &= ~FDI_LINK_TRAIN_NONE;
3130 temp |= FDI_LINK_TRAIN_PATTERN_2;
3131 I915_WRITE(reg, temp);
3136 reg = FDI_RX_IIR(pipe);
3137 for (tries = 0; tries < 5; tries++) {
3138 temp = I915_READ(reg);
3139 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3141 if (temp & FDI_RX_SYMBOL_LOCK) {
3142 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3143 DRM_DEBUG_KMS("FDI train 2 done.\n");
3148 DRM_ERROR("FDI train 2 fail!\n");
3150 DRM_DEBUG_KMS("FDI train done\n");
3154 static const int snb_b_fdi_train_param[] = {
3155 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3156 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3157 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3158 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3161 /* The FDI link training functions for SNB/Cougarpoint. */
3162 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3164 struct drm_device *dev = crtc->dev;
3165 struct drm_i915_private *dev_priv = dev->dev_private;
3166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3167 int pipe = intel_crtc->pipe;
3168 u32 reg, temp, i, retry;
3170 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3172 reg = FDI_RX_IMR(pipe);
3173 temp = I915_READ(reg);
3174 temp &= ~FDI_RX_SYMBOL_LOCK;
3175 temp &= ~FDI_RX_BIT_LOCK;
3176 I915_WRITE(reg, temp);
3181 /* enable CPU FDI TX and PCH FDI RX */
3182 reg = FDI_TX_CTL(pipe);
3183 temp = I915_READ(reg);
3184 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3185 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3186 temp &= ~FDI_LINK_TRAIN_NONE;
3187 temp |= FDI_LINK_TRAIN_PATTERN_1;
3188 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3190 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3191 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3193 I915_WRITE(FDI_RX_MISC(pipe),
3194 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3196 reg = FDI_RX_CTL(pipe);
3197 temp = I915_READ(reg);
3198 if (HAS_PCH_CPT(dev)) {
3199 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3200 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3202 temp &= ~FDI_LINK_TRAIN_NONE;
3203 temp |= FDI_LINK_TRAIN_PATTERN_1;
3205 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3210 for (i = 0; i < 4; i++) {
3211 reg = FDI_TX_CTL(pipe);
3212 temp = I915_READ(reg);
3213 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3214 temp |= snb_b_fdi_train_param[i];
3215 I915_WRITE(reg, temp);
3220 for (retry = 0; retry < 5; retry++) {
3221 reg = FDI_RX_IIR(pipe);
3222 temp = I915_READ(reg);
3223 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3224 if (temp & FDI_RX_BIT_LOCK) {
3225 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3226 DRM_DEBUG_KMS("FDI train 1 done.\n");