]> git.codelabs.ch Git - muen/linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Rename plane_config to initial_plane_config
[muen/linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
45
46 /* Primary plane formats supported by all gen */
47 #define COMMON_PRIMARY_FORMATS \
48         DRM_FORMAT_C8, \
49         DRM_FORMAT_RGB565, \
50         DRM_FORMAT_XRGB8888, \
51         DRM_FORMAT_ARGB8888
52
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t intel_primary_formats_gen2[] = {
55         COMMON_PRIMARY_FORMATS,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_ARGB1555,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t intel_primary_formats_gen4[] = {
62         COMMON_PRIMARY_FORMATS, \
63         DRM_FORMAT_XBGR8888,
64         DRM_FORMAT_ABGR8888,
65         DRM_FORMAT_XRGB2101010,
66         DRM_FORMAT_ARGB2101010,
67         DRM_FORMAT_XBGR2101010,
68         DRM_FORMAT_ABGR2101010,
69 };
70
71 /* Cursor formats */
72 static const uint32_t intel_cursor_formats[] = {
73         DRM_FORMAT_ARGB8888,
74 };
75
76 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
77
78 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
79                                 struct intel_crtc_state *pipe_config);
80 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
81                                    struct intel_crtc_state *pipe_config);
82
83 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
84                           int x, int y, struct drm_framebuffer *old_fb);
85 static int intel_framebuffer_init(struct drm_device *dev,
86                                   struct intel_framebuffer *ifb,
87                                   struct drm_mode_fb_cmd2 *mode_cmd,
88                                   struct drm_i915_gem_object *obj);
89 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
90 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
91 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
92                                          struct intel_link_m_n *m_n,
93                                          struct intel_link_m_n *m2_n2);
94 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
95 static void haswell_set_pipeconf(struct drm_crtc *crtc);
96 static void intel_set_pipe_csc(struct drm_crtc *crtc);
97 static void vlv_prepare_pll(struct intel_crtc *crtc,
98                             const struct intel_crtc_state *pipe_config);
99 static void chv_prepare_pll(struct intel_crtc *crtc,
100                             const struct intel_crtc_state *pipe_config);
101 static void intel_begin_crtc_commit(struct drm_crtc *crtc);
102 static void intel_finish_crtc_commit(struct drm_crtc *crtc);
103
104 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
105 {
106         if (!connector->mst_port)
107                 return connector->encoder;
108         else
109                 return &connector->mst_port->mst_encoders[pipe]->base;
110 }
111
112 typedef struct {
113         int     min, max;
114 } intel_range_t;
115
116 typedef struct {
117         int     dot_limit;
118         int     p2_slow, p2_fast;
119 } intel_p2_t;
120
121 typedef struct intel_limit intel_limit_t;
122 struct intel_limit {
123         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
124         intel_p2_t          p2;
125 };
126
127 int
128 intel_pch_rawclk(struct drm_device *dev)
129 {
130         struct drm_i915_private *dev_priv = dev->dev_private;
131
132         WARN_ON(!HAS_PCH_SPLIT(dev));
133
134         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
135 }
136
137 static inline u32 /* units of 100MHz */
138 intel_fdi_link_freq(struct drm_device *dev)
139 {
140         if (IS_GEN5(dev)) {
141                 struct drm_i915_private *dev_priv = dev->dev_private;
142                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
143         } else
144                 return 27;
145 }
146
147 static const intel_limit_t intel_limits_i8xx_dac = {
148         .dot = { .min = 25000, .max = 350000 },
149         .vco = { .min = 908000, .max = 1512000 },
150         .n = { .min = 2, .max = 16 },
151         .m = { .min = 96, .max = 140 },
152         .m1 = { .min = 18, .max = 26 },
153         .m2 = { .min = 6, .max = 16 },
154         .p = { .min = 4, .max = 128 },
155         .p1 = { .min = 2, .max = 33 },
156         .p2 = { .dot_limit = 165000,
157                 .p2_slow = 4, .p2_fast = 2 },
158 };
159
160 static const intel_limit_t intel_limits_i8xx_dvo = {
161         .dot = { .min = 25000, .max = 350000 },
162         .vco = { .min = 908000, .max = 1512000 },
163         .n = { .min = 2, .max = 16 },
164         .m = { .min = 96, .max = 140 },
165         .m1 = { .min = 18, .max = 26 },
166         .m2 = { .min = 6, .max = 16 },
167         .p = { .min = 4, .max = 128 },
168         .p1 = { .min = 2, .max = 33 },
169         .p2 = { .dot_limit = 165000,
170                 .p2_slow = 4, .p2_fast = 4 },
171 };
172
173 static const intel_limit_t intel_limits_i8xx_lvds = {
174         .dot = { .min = 25000, .max = 350000 },
175         .vco = { .min = 908000, .max = 1512000 },
176         .n = { .min = 2, .max = 16 },
177         .m = { .min = 96, .max = 140 },
178         .m1 = { .min = 18, .max = 26 },
179         .m2 = { .min = 6, .max = 16 },
180         .p = { .min = 4, .max = 128 },
181         .p1 = { .min = 1, .max = 6 },
182         .p2 = { .dot_limit = 165000,
183                 .p2_slow = 14, .p2_fast = 7 },
184 };
185
186 static const intel_limit_t intel_limits_i9xx_sdvo = {
187         .dot = { .min = 20000, .max = 400000 },
188         .vco = { .min = 1400000, .max = 2800000 },
189         .n = { .min = 1, .max = 6 },
190         .m = { .min = 70, .max = 120 },
191         .m1 = { .min = 8, .max = 18 },
192         .m2 = { .min = 3, .max = 7 },
193         .p = { .min = 5, .max = 80 },
194         .p1 = { .min = 1, .max = 8 },
195         .p2 = { .dot_limit = 200000,
196                 .p2_slow = 10, .p2_fast = 5 },
197 };
198
199 static const intel_limit_t intel_limits_i9xx_lvds = {
200         .dot = { .min = 20000, .max = 400000 },
201         .vco = { .min = 1400000, .max = 2800000 },
202         .n = { .min = 1, .max = 6 },
203         .m = { .min = 70, .max = 120 },
204         .m1 = { .min = 8, .max = 18 },
205         .m2 = { .min = 3, .max = 7 },
206         .p = { .min = 7, .max = 98 },
207         .p1 = { .min = 1, .max = 8 },
208         .p2 = { .dot_limit = 112000,
209                 .p2_slow = 14, .p2_fast = 7 },
210 };
211
212
213 static const intel_limit_t intel_limits_g4x_sdvo = {
214         .dot = { .min = 25000, .max = 270000 },
215         .vco = { .min = 1750000, .max = 3500000},
216         .n = { .min = 1, .max = 4 },
217         .m = { .min = 104, .max = 138 },
218         .m1 = { .min = 17, .max = 23 },
219         .m2 = { .min = 5, .max = 11 },
220         .p = { .min = 10, .max = 30 },
221         .p1 = { .min = 1, .max = 3},
222         .p2 = { .dot_limit = 270000,
223                 .p2_slow = 10,
224                 .p2_fast = 10
225         },
226 };
227
228 static const intel_limit_t intel_limits_g4x_hdmi = {
229         .dot = { .min = 22000, .max = 400000 },
230         .vco = { .min = 1750000, .max = 3500000},
231         .n = { .min = 1, .max = 4 },
232         .m = { .min = 104, .max = 138 },
233         .m1 = { .min = 16, .max = 23 },
234         .m2 = { .min = 5, .max = 11 },
235         .p = { .min = 5, .max = 80 },
236         .p1 = { .min = 1, .max = 8},
237         .p2 = { .dot_limit = 165000,
238                 .p2_slow = 10, .p2_fast = 5 },
239 };
240
241 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
242         .dot = { .min = 20000, .max = 115000 },
243         .vco = { .min = 1750000, .max = 3500000 },
244         .n = { .min = 1, .max = 3 },
245         .m = { .min = 104, .max = 138 },
246         .m1 = { .min = 17, .max = 23 },
247         .m2 = { .min = 5, .max = 11 },
248         .p = { .min = 28, .max = 112 },
249         .p1 = { .min = 2, .max = 8 },
250         .p2 = { .dot_limit = 0,
251                 .p2_slow = 14, .p2_fast = 14
252         },
253 };
254
255 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
256         .dot = { .min = 80000, .max = 224000 },
257         .vco = { .min = 1750000, .max = 3500000 },
258         .n = { .min = 1, .max = 3 },
259         .m = { .min = 104, .max = 138 },
260         .m1 = { .min = 17, .max = 23 },
261         .m2 = { .min = 5, .max = 11 },
262         .p = { .min = 14, .max = 42 },
263         .p1 = { .min = 2, .max = 6 },
264         .p2 = { .dot_limit = 0,
265                 .p2_slow = 7, .p2_fast = 7
266         },
267 };
268
269 static const intel_limit_t intel_limits_pineview_sdvo = {
270         .dot = { .min = 20000, .max = 400000},
271         .vco = { .min = 1700000, .max = 3500000 },
272         /* Pineview's Ncounter is a ring counter */
273         .n = { .min = 3, .max = 6 },
274         .m = { .min = 2, .max = 256 },
275         /* Pineview only has one combined m divider, which we treat as m2. */
276         .m1 = { .min = 0, .max = 0 },
277         .m2 = { .min = 0, .max = 254 },
278         .p = { .min = 5, .max = 80 },
279         .p1 = { .min = 1, .max = 8 },
280         .p2 = { .dot_limit = 200000,
281                 .p2_slow = 10, .p2_fast = 5 },
282 };
283
284 static const intel_limit_t intel_limits_pineview_lvds = {
285         .dot = { .min = 20000, .max = 400000 },
286         .vco = { .min = 1700000, .max = 3500000 },
287         .n = { .min = 3, .max = 6 },
288         .m = { .min = 2, .max = 256 },
289         .m1 = { .min = 0, .max = 0 },
290         .m2 = { .min = 0, .max = 254 },
291         .p = { .min = 7, .max = 112 },
292         .p1 = { .min = 1, .max = 8 },
293         .p2 = { .dot_limit = 112000,
294                 .p2_slow = 14, .p2_fast = 14 },
295 };
296
297 /* Ironlake / Sandybridge
298  *
299  * We calculate clock using (register_value + 2) for N/M1/M2, so here
300  * the range value for them is (actual_value - 2).
301  */
302 static const intel_limit_t intel_limits_ironlake_dac = {
303         .dot = { .min = 25000, .max = 350000 },
304         .vco = { .min = 1760000, .max = 3510000 },
305         .n = { .min = 1, .max = 5 },
306         .m = { .min = 79, .max = 127 },
307         .m1 = { .min = 12, .max = 22 },
308         .m2 = { .min = 5, .max = 9 },
309         .p = { .min = 5, .max = 80 },
310         .p1 = { .min = 1, .max = 8 },
311         .p2 = { .dot_limit = 225000,
312                 .p2_slow = 10, .p2_fast = 5 },
313 };
314
315 static const intel_limit_t intel_limits_ironlake_single_lvds = {
316         .dot = { .min = 25000, .max = 350000 },
317         .vco = { .min = 1760000, .max = 3510000 },
318         .n = { .min = 1, .max = 3 },
319         .m = { .min = 79, .max = 118 },
320         .m1 = { .min = 12, .max = 22 },
321         .m2 = { .min = 5, .max = 9 },
322         .p = { .min = 28, .max = 112 },
323         .p1 = { .min = 2, .max = 8 },
324         .p2 = { .dot_limit = 225000,
325                 .p2_slow = 14, .p2_fast = 14 },
326 };
327
328 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
329         .dot = { .min = 25000, .max = 350000 },
330         .vco = { .min = 1760000, .max = 3510000 },
331         .n = { .min = 1, .max = 3 },
332         .m = { .min = 79, .max = 127 },
333         .m1 = { .min = 12, .max = 22 },
334         .m2 = { .min = 5, .max = 9 },
335         .p = { .min = 14, .max = 56 },
336         .p1 = { .min = 2, .max = 8 },
337         .p2 = { .dot_limit = 225000,
338                 .p2_slow = 7, .p2_fast = 7 },
339 };
340
341 /* LVDS 100mhz refclk limits. */
342 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
343         .dot = { .min = 25000, .max = 350000 },
344         .vco = { .min = 1760000, .max = 3510000 },
345         .n = { .min = 1, .max = 2 },
346         .m = { .min = 79, .max = 126 },
347         .m1 = { .min = 12, .max = 22 },
348         .m2 = { .min = 5, .max = 9 },
349         .p = { .min = 28, .max = 112 },
350         .p1 = { .min = 2, .max = 8 },
351         .p2 = { .dot_limit = 225000,
352                 .p2_slow = 14, .p2_fast = 14 },
353 };
354
355 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
356         .dot = { .min = 25000, .max = 350000 },
357         .vco = { .min = 1760000, .max = 3510000 },
358         .n = { .min = 1, .max = 3 },
359         .m = { .min = 79, .max = 126 },
360         .m1 = { .min = 12, .max = 22 },
361         .m2 = { .min = 5, .max = 9 },
362         .p = { .min = 14, .max = 42 },
363         .p1 = { .min = 2, .max = 6 },
364         .p2 = { .dot_limit = 225000,
365                 .p2_slow = 7, .p2_fast = 7 },
366 };
367
368 static const intel_limit_t intel_limits_vlv = {
369          /*
370           * These are the data rate limits (measured in fast clocks)
371           * since those are the strictest limits we have. The fast
372           * clock and actual rate limits are more relaxed, so checking
373           * them would make no difference.
374           */
375         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
376         .vco = { .min = 4000000, .max = 6000000 },
377         .n = { .min = 1, .max = 7 },
378         .m1 = { .min = 2, .max = 3 },
379         .m2 = { .min = 11, .max = 156 },
380         .p1 = { .min = 2, .max = 3 },
381         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
382 };
383
384 static const intel_limit_t intel_limits_chv = {
385         /*
386          * These are the data rate limits (measured in fast clocks)
387          * since those are the strictest limits we have.  The fast
388          * clock and actual rate limits are more relaxed, so checking
389          * them would make no difference.
390          */
391         .dot = { .min = 25000 * 5, .max = 540000 * 5},
392         .vco = { .min = 4860000, .max = 6700000 },
393         .n = { .min = 1, .max = 1 },
394         .m1 = { .min = 2, .max = 2 },
395         .m2 = { .min = 24 << 22, .max = 175 << 22 },
396         .p1 = { .min = 2, .max = 4 },
397         .p2 = { .p2_slow = 1, .p2_fast = 14 },
398 };
399
400 static void vlv_clock(int refclk, intel_clock_t *clock)
401 {
402         clock->m = clock->m1 * clock->m2;
403         clock->p = clock->p1 * clock->p2;
404         if (WARN_ON(clock->n == 0 || clock->p == 0))
405                 return;
406         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
407         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
408 }
409
410 /**
411  * Returns whether any output on the specified pipe is of the specified type
412  */
413 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
414 {
415         struct drm_device *dev = crtc->base.dev;
416         struct intel_encoder *encoder;
417
418         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
419                 if (encoder->type == type)
420                         return true;
421
422         return false;
423 }
424
425 /**
426  * Returns whether any output on the specified pipe will have the specified
427  * type after a staged modeset is complete, i.e., the same as
428  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
429  * encoder->crtc.
430  */
431 static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
432 {
433         struct drm_device *dev = crtc->base.dev;
434         struct intel_encoder *encoder;
435
436         for_each_intel_encoder(dev, encoder)
437                 if (encoder->new_crtc == crtc && encoder->type == type)
438                         return true;
439
440         return false;
441 }
442
443 static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
444                                                 int refclk)
445 {
446         struct drm_device *dev = crtc->base.dev;
447         const intel_limit_t *limit;
448
449         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
450                 if (intel_is_dual_link_lvds(dev)) {
451                         if (refclk == 100000)
452                                 limit = &intel_limits_ironlake_dual_lvds_100m;
453                         else
454                                 limit = &intel_limits_ironlake_dual_lvds;
455                 } else {
456                         if (refclk == 100000)
457                                 limit = &intel_limits_ironlake_single_lvds_100m;
458                         else
459                                 limit = &intel_limits_ironlake_single_lvds;
460                 }
461         } else
462                 limit = &intel_limits_ironlake_dac;
463
464         return limit;
465 }
466
467 static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
468 {
469         struct drm_device *dev = crtc->base.dev;
470         const intel_limit_t *limit;
471
472         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
473                 if (intel_is_dual_link_lvds(dev))
474                         limit = &intel_limits_g4x_dual_channel_lvds;
475                 else
476                         limit = &intel_limits_g4x_single_channel_lvds;
477         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
478                    intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
479                 limit = &intel_limits_g4x_hdmi;
480         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
481                 limit = &intel_limits_g4x_sdvo;
482         } else /* The option is for other outputs */
483                 limit = &intel_limits_i9xx_sdvo;
484
485         return limit;
486 }
487
488 static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
489 {
490         struct drm_device *dev = crtc->base.dev;
491         const intel_limit_t *limit;
492
493         if (HAS_PCH_SPLIT(dev))
494                 limit = intel_ironlake_limit(crtc, refclk);
495         else if (IS_G4X(dev)) {
496                 limit = intel_g4x_limit(crtc);
497         } else if (IS_PINEVIEW(dev)) {
498                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
499                         limit = &intel_limits_pineview_lvds;
500                 else
501                         limit = &intel_limits_pineview_sdvo;
502         } else if (IS_CHERRYVIEW(dev)) {
503                 limit = &intel_limits_chv;
504         } else if (IS_VALLEYVIEW(dev)) {
505                 limit = &intel_limits_vlv;
506         } else if (!IS_GEN2(dev)) {
507                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
508                         limit = &intel_limits_i9xx_lvds;
509                 else
510                         limit = &intel_limits_i9xx_sdvo;
511         } else {
512                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
513                         limit = &intel_limits_i8xx_lvds;
514                 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
515                         limit = &intel_limits_i8xx_dvo;
516                 else
517                         limit = &intel_limits_i8xx_dac;
518         }
519         return limit;
520 }
521
522 /* m1 is reserved as 0 in Pineview, n is a ring counter */
523 static void pineview_clock(int refclk, intel_clock_t *clock)
524 {
525         clock->m = clock->m2 + 2;
526         clock->p = clock->p1 * clock->p2;
527         if (WARN_ON(clock->n == 0 || clock->p == 0))
528                 return;
529         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
530         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
531 }
532
533 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
534 {
535         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
536 }
537
538 static void i9xx_clock(int refclk, intel_clock_t *clock)
539 {
540         clock->m = i9xx_dpll_compute_m(clock);
541         clock->p = clock->p1 * clock->p2;
542         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
543                 return;
544         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
545         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546 }
547
548 static void chv_clock(int refclk, intel_clock_t *clock)
549 {
550         clock->m = clock->m1 * clock->m2;
551         clock->p = clock->p1 * clock->p2;
552         if (WARN_ON(clock->n == 0 || clock->p == 0))
553                 return;
554         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
555                         clock->n << 22);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557 }
558
559 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
560 /**
561  * Returns whether the given set of divisors are valid for a given refclk with
562  * the given connectors.
563  */
564
565 static bool intel_PLL_is_valid(struct drm_device *dev,
566                                const intel_limit_t *limit,
567                                const intel_clock_t *clock)
568 {
569         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
570                 INTELPllInvalid("n out of range\n");
571         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
572                 INTELPllInvalid("p1 out of range\n");
573         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
574                 INTELPllInvalid("m2 out of range\n");
575         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
576                 INTELPllInvalid("m1 out of range\n");
577
578         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
579                 if (clock->m1 <= clock->m2)
580                         INTELPllInvalid("m1 <= m2\n");
581
582         if (!IS_VALLEYVIEW(dev)) {
583                 if (clock->p < limit->p.min || limit->p.max < clock->p)
584                         INTELPllInvalid("p out of range\n");
585                 if (clock->m < limit->m.min || limit->m.max < clock->m)
586                         INTELPllInvalid("m out of range\n");
587         }
588
589         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
590                 INTELPllInvalid("vco out of range\n");
591         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
592          * connector, etc., rather than just a single range.
593          */
594         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
595                 INTELPllInvalid("dot out of range\n");
596
597         return true;
598 }
599
600 static bool
601 i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
602                     int target, int refclk, intel_clock_t *match_clock,
603                     intel_clock_t *best_clock)
604 {
605         struct drm_device *dev = crtc->base.dev;
606         intel_clock_t clock;
607         int err = target;
608
609         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
610                 /*
611                  * For LVDS just rely on its current settings for dual-channel.
612                  * We haven't figured out how to reliably set up different
613                  * single/dual channel state, if we even can.
614                  */
615                 if (intel_is_dual_link_lvds(dev))
616                         clock.p2 = limit->p2.p2_fast;
617                 else
618                         clock.p2 = limit->p2.p2_slow;
619         } else {
620                 if (target < limit->p2.dot_limit)
621                         clock.p2 = limit->p2.p2_slow;
622                 else
623                         clock.p2 = limit->p2.p2_fast;
624         }
625
626         memset(best_clock, 0, sizeof(*best_clock));
627
628         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
629              clock.m1++) {
630                 for (clock.m2 = limit->m2.min;
631                      clock.m2 <= limit->m2.max; clock.m2++) {
632                         if (clock.m2 >= clock.m1)
633                                 break;
634                         for (clock.n = limit->n.min;
635                              clock.n <= limit->n.max; clock.n++) {
636                                 for (clock.p1 = limit->p1.min;
637                                         clock.p1 <= limit->p1.max; clock.p1++) {
638                                         int this_err;
639
640                                         i9xx_clock(refclk, &clock);
641                                         if (!intel_PLL_is_valid(dev, limit,
642                                                                 &clock))
643                                                 continue;
644                                         if (match_clock &&
645                                             clock.p != match_clock->p)
646                                                 continue;
647
648                                         this_err = abs(clock.dot - target);
649                                         if (this_err < err) {
650                                                 *best_clock = clock;
651                                                 err = this_err;
652                                         }
653                                 }
654                         }
655                 }
656         }
657
658         return (err != target);
659 }
660
661 static bool
662 pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
663                    int target, int refclk, intel_clock_t *match_clock,
664                    intel_clock_t *best_clock)
665 {
666         struct drm_device *dev = crtc->base.dev;
667         intel_clock_t clock;
668         int err = target;
669
670         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
671                 /*
672                  * For LVDS just rely on its current settings for dual-channel.
673                  * We haven't figured out how to reliably set up different
674                  * single/dual channel state, if we even can.
675                  */
676                 if (intel_is_dual_link_lvds(dev))
677                         clock.p2 = limit->p2.p2_fast;
678                 else
679                         clock.p2 = limit->p2.p2_slow;
680         } else {
681                 if (target < limit->p2.dot_limit)
682                         clock.p2 = limit->p2.p2_slow;
683                 else
684                         clock.p2 = limit->p2.p2_fast;
685         }
686
687         memset(best_clock, 0, sizeof(*best_clock));
688
689         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
690              clock.m1++) {
691                 for (clock.m2 = limit->m2.min;
692                      clock.m2 <= limit->m2.max; clock.m2++) {
693                         for (clock.n = limit->n.min;
694                              clock.n <= limit->n.max; clock.n++) {
695                                 for (clock.p1 = limit->p1.min;
696                                         clock.p1 <= limit->p1.max; clock.p1++) {
697                                         int this_err;
698
699                                         pineview_clock(refclk, &clock);
700                                         if (!intel_PLL_is_valid(dev, limit,
701                                                                 &clock))
702                                                 continue;
703                                         if (match_clock &&
704                                             clock.p != match_clock->p)
705                                                 continue;
706
707                                         this_err = abs(clock.dot - target);
708                                         if (this_err < err) {
709                                                 *best_clock = clock;
710                                                 err = this_err;
711                                         }
712                                 }
713                         }
714                 }
715         }
716
717         return (err != target);
718 }
719
720 static bool
721 g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
722                    int target, int refclk, intel_clock_t *match_clock,
723                    intel_clock_t *best_clock)
724 {
725         struct drm_device *dev = crtc->base.dev;
726         intel_clock_t clock;
727         int max_n;
728         bool found;
729         /* approximately equals target * 0.00585 */
730         int err_most = (target >> 8) + (target >> 9);
731         found = false;
732
733         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
734                 if (intel_is_dual_link_lvds(dev))
735                         clock.p2 = limit->p2.p2_fast;
736                 else
737                         clock.p2 = limit->p2.p2_slow;
738         } else {
739                 if (target < limit->p2.dot_limit)
740                         clock.p2 = limit->p2.p2_slow;
741                 else
742                         clock.p2 = limit->p2.p2_fast;
743         }
744
745         memset(best_clock, 0, sizeof(*best_clock));
746         max_n = limit->n.max;
747         /* based on hardware requirement, prefer smaller n to precision */
748         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
749                 /* based on hardware requirement, prefere larger m1,m2 */
750                 for (clock.m1 = limit->m1.max;
751                      clock.m1 >= limit->m1.min; clock.m1--) {
752                         for (clock.m2 = limit->m2.max;
753                              clock.m2 >= limit->m2.min; clock.m2--) {
754                                 for (clock.p1 = limit->p1.max;
755                                      clock.p1 >= limit->p1.min; clock.p1--) {
756                                         int this_err;
757
758                                         i9xx_clock(refclk, &clock);
759                                         if (!intel_PLL_is_valid(dev, limit,
760                                                                 &clock))
761                                                 continue;
762
763                                         this_err = abs(clock.dot - target);
764                                         if (this_err < err_most) {
765                                                 *best_clock = clock;
766                                                 err_most = this_err;
767                                                 max_n = clock.n;
768                                                 found = true;
769                                         }
770                                 }
771                         }
772                 }
773         }
774         return found;
775 }
776
777 static bool
778 vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
779                    int target, int refclk, intel_clock_t *match_clock,
780                    intel_clock_t *best_clock)
781 {
782         struct drm_device *dev = crtc->base.dev;
783         intel_clock_t clock;
784         unsigned int bestppm = 1000000;
785         /* min update 19.2 MHz */
786         int max_n = min(limit->n.max, refclk / 19200);
787         bool found = false;
788
789         target *= 5; /* fast clock */
790
791         memset(best_clock, 0, sizeof(*best_clock));
792
793         /* based on hardware requirement, prefer smaller n to precision */
794         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
795                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
797                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
798                                 clock.p = clock.p1 * clock.p2;
799                                 /* based on hardware requirement, prefer bigger m1,m2 values */
800                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
801                                         unsigned int ppm, diff;
802
803                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
804                                                                      refclk * clock.m1);
805
806                                         vlv_clock(refclk, &clock);
807
808                                         if (!intel_PLL_is_valid(dev, limit,
809                                                                 &clock))
810                                                 continue;
811
812                                         diff = abs(clock.dot - target);
813                                         ppm = div_u64(1000000ULL * diff, target);
814
815                                         if (ppm < 100 && clock.p > best_clock->p) {
816                                                 bestppm = 0;
817                                                 *best_clock = clock;
818                                                 found = true;
819                                         }
820
821                                         if (bestppm >= 10 && ppm < bestppm - 10) {
822                                                 bestppm = ppm;
823                                                 *best_clock = clock;
824                                                 found = true;
825                                         }
826                                 }
827                         }
828                 }
829         }
830
831         return found;
832 }
833
834 static bool
835 chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
836                    int target, int refclk, intel_clock_t *match_clock,
837                    intel_clock_t *best_clock)
838 {
839         struct drm_device *dev = crtc->base.dev;
840         intel_clock_t clock;
841         uint64_t m2;
842         int found = false;
843
844         memset(best_clock, 0, sizeof(*best_clock));
845
846         /*
847          * Based on hardware doc, the n always set to 1, and m1 always
848          * set to 2.  If requires to support 200Mhz refclk, we need to
849          * revisit this because n may not 1 anymore.
850          */
851         clock.n = 1, clock.m1 = 2;
852         target *= 5;    /* fast clock */
853
854         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
855                 for (clock.p2 = limit->p2.p2_fast;
856                                 clock.p2 >= limit->p2.p2_slow;
857                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
858
859                         clock.p = clock.p1 * clock.p2;
860
861                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
862                                         clock.n) << 22, refclk * clock.m1);
863
864                         if (m2 > INT_MAX/clock.m1)
865                                 continue;
866
867                         clock.m2 = m2;
868
869                         chv_clock(refclk, &clock);
870
871                         if (!intel_PLL_is_valid(dev, limit, &clock))
872                                 continue;
873
874                         /* based on hardware requirement, prefer bigger p
875                          */
876                         if (clock.p > best_clock->p) {
877                                 *best_clock = clock;
878                                 found = true;
879                         }
880                 }
881         }
882
883         return found;
884 }
885
886 bool intel_crtc_active(struct drm_crtc *crtc)
887 {
888         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
889
890         /* Be paranoid as we can arrive here with only partial
891          * state retrieved from the hardware during setup.
892          *
893          * We can ditch the adjusted_mode.crtc_clock check as soon
894          * as Haswell has gained clock readout/fastboot support.
895          *
896          * We can ditch the crtc->primary->fb check as soon as we can
897          * properly reconstruct framebuffers.
898          */
899         return intel_crtc->active && crtc->primary->fb &&
900                 intel_crtc->config->base.adjusted_mode.crtc_clock;
901 }
902
903 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
904                                              enum pipe pipe)
905 {
906         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
907         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
908
909         return intel_crtc->config->cpu_transcoder;
910 }
911
912 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
913 {
914         struct drm_i915_private *dev_priv = dev->dev_private;
915         u32 reg = PIPEDSL(pipe);
916         u32 line1, line2;
917         u32 line_mask;
918
919         if (IS_GEN2(dev))
920                 line_mask = DSL_LINEMASK_GEN2;
921         else
922                 line_mask = DSL_LINEMASK_GEN3;
923
924         line1 = I915_READ(reg) & line_mask;
925         mdelay(5);
926         line2 = I915_READ(reg) & line_mask;
927
928         return line1 == line2;
929 }
930
931 /*
932  * intel_wait_for_pipe_off - wait for pipe to turn off
933  * @crtc: crtc whose pipe to wait for
934  *
935  * After disabling a pipe, we can't wait for vblank in the usual way,
936  * spinning on the vblank interrupt status bit, since we won't actually
937  * see an interrupt when the pipe is disabled.
938  *
939  * On Gen4 and above:
940  *   wait for the pipe register state bit to turn off
941  *
942  * Otherwise:
943  *   wait for the display line value to settle (it usually
944  *   ends up stopping at the start of the next frame).
945  *
946  */
947 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
948 {
949         struct drm_device *dev = crtc->base.dev;
950         struct drm_i915_private *dev_priv = dev->dev_private;
951         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
952         enum pipe pipe = crtc->pipe;
953
954         if (INTEL_INFO(dev)->gen >= 4) {
955                 int reg = PIPECONF(cpu_transcoder);
956
957                 /* Wait for the Pipe State to go off */
958                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
959                              100))
960                         WARN(1, "pipe_off wait timed out\n");
961         } else {
962                 /* Wait for the display line to settle */
963                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
964                         WARN(1, "pipe_off wait timed out\n");
965         }
966 }
967
968 /*
969  * ibx_digital_port_connected - is the specified port connected?
970  * @dev_priv: i915 private structure
971  * @port: the port to test
972  *
973  * Returns true if @port is connected, false otherwise.
974  */
975 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
976                                 struct intel_digital_port *port)
977 {
978         u32 bit;
979
980         if (HAS_PCH_IBX(dev_priv->dev)) {
981                 switch (port->port) {
982                 case PORT_B:
983                         bit = SDE_PORTB_HOTPLUG;
984                         break;
985                 case PORT_C:
986                         bit = SDE_PORTC_HOTPLUG;
987                         break;
988                 case PORT_D:
989                         bit = SDE_PORTD_HOTPLUG;
990                         break;
991                 default:
992                         return true;
993                 }
994         } else {
995                 switch (port->port) {
996                 case PORT_B:
997                         bit = SDE_PORTB_HOTPLUG_CPT;
998                         break;
999                 case PORT_C:
1000                         bit = SDE_PORTC_HOTPLUG_CPT;
1001                         break;
1002                 case PORT_D:
1003                         bit = SDE_PORTD_HOTPLUG_CPT;
1004                         break;
1005                 default:
1006                         return true;
1007                 }
1008         }
1009
1010         return I915_READ(SDEISR) & bit;
1011 }
1012
1013 static const char *state_string(bool enabled)
1014 {
1015         return enabled ? "on" : "off";
1016 }
1017
1018 /* Only for pre-ILK configs */
1019 void assert_pll(struct drm_i915_private *dev_priv,
1020                 enum pipe pipe, bool state)
1021 {
1022         int reg;
1023         u32 val;
1024         bool cur_state;
1025
1026         reg = DPLL(pipe);
1027         val = I915_READ(reg);
1028         cur_state = !!(val & DPLL_VCO_ENABLE);
1029         I915_STATE_WARN(cur_state != state,
1030              "PLL state assertion failure (expected %s, current %s)\n",
1031              state_string(state), state_string(cur_state));
1032 }
1033
1034 /* XXX: the dsi pll is shared between MIPI DSI ports */
1035 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1036 {
1037         u32 val;
1038         bool cur_state;
1039
1040         mutex_lock(&dev_priv->dpio_lock);
1041         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1042         mutex_unlock(&dev_priv->dpio_lock);
1043
1044         cur_state = val & DSI_PLL_VCO_EN;
1045         I915_STATE_WARN(cur_state != state,
1046              "DSI PLL state assertion failure (expected %s, current %s)\n",
1047              state_string(state), state_string(cur_state));
1048 }
1049 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1050 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1051
1052 struct intel_shared_dpll *
1053 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1054 {
1055         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1056
1057         if (crtc->config->shared_dpll < 0)
1058                 return NULL;
1059
1060         return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1061 }
1062
1063 /* For ILK+ */
1064 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1065                         struct intel_shared_dpll *pll,
1066                         bool state)
1067 {
1068         bool cur_state;
1069         struct intel_dpll_hw_state hw_state;
1070
1071         if (WARN (!pll,
1072                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1073                 return;
1074
1075         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1076         I915_STATE_WARN(cur_state != state,
1077              "%s assertion failure (expected %s, current %s)\n",
1078              pll->name, state_string(state), state_string(cur_state));
1079 }
1080
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082                           enum pipe pipe, bool state)
1083 {
1084         int reg;
1085         u32 val;
1086         bool cur_state;
1087         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1088                                                                       pipe);
1089
1090         if (HAS_DDI(dev_priv->dev)) {
1091                 /* DDI does not have a specific FDI_TX register */
1092                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1093                 val = I915_READ(reg);
1094                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1095         } else {
1096                 reg = FDI_TX_CTL(pipe);
1097                 val = I915_READ(reg);
1098                 cur_state = !!(val & FDI_TX_ENABLE);
1099         }
1100         I915_STATE_WARN(cur_state != state,
1101              "FDI TX state assertion failure (expected %s, current %s)\n",
1102              state_string(state), state_string(cur_state));
1103 }
1104 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1105 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1106
1107 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1108                           enum pipe pipe, bool state)
1109 {
1110         int reg;
1111         u32 val;
1112         bool cur_state;
1113
1114         reg = FDI_RX_CTL(pipe);
1115         val = I915_READ(reg);
1116         cur_state = !!(val & FDI_RX_ENABLE);
1117         I915_STATE_WARN(cur_state != state,
1118              "FDI RX state assertion failure (expected %s, current %s)\n",
1119              state_string(state), state_string(cur_state));
1120 }
1121 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1122 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1123
1124 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1125                                       enum pipe pipe)
1126 {
1127         int reg;
1128         u32 val;
1129
1130         /* ILK FDI PLL is always enabled */
1131         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1132                 return;
1133
1134         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1135         if (HAS_DDI(dev_priv->dev))
1136                 return;
1137
1138         reg = FDI_TX_CTL(pipe);
1139         val = I915_READ(reg);
1140         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1141 }
1142
1143 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1144                        enum pipe pipe, bool state)
1145 {
1146         int reg;
1147         u32 val;
1148         bool cur_state;
1149
1150         reg = FDI_RX_CTL(pipe);
1151         val = I915_READ(reg);
1152         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1153         I915_STATE_WARN(cur_state != state,
1154              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1155              state_string(state), state_string(cur_state));
1156 }
1157
1158 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1159                            enum pipe pipe)
1160 {
1161         struct drm_device *dev = dev_priv->dev;
1162         int pp_reg;
1163         u32 val;
1164         enum pipe panel_pipe = PIPE_A;
1165         bool locked = true;
1166
1167         if (WARN_ON(HAS_DDI(dev)))
1168                 return;
1169
1170         if (HAS_PCH_SPLIT(dev)) {
1171                 u32 port_sel;
1172
1173                 pp_reg = PCH_PP_CONTROL;
1174                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1175
1176                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1177                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1178                         panel_pipe = PIPE_B;
1179                 /* XXX: else fix for eDP */
1180         } else if (IS_VALLEYVIEW(dev)) {
1181                 /* presumably write lock depends on pipe, not port select */
1182                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1183                 panel_pipe = pipe;
1184         } else {
1185                 pp_reg = PP_CONTROL;
1186                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1187                         panel_pipe = PIPE_B;
1188         }
1189
1190         val = I915_READ(pp_reg);
1191         if (!(val & PANEL_POWER_ON) ||
1192             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1193                 locked = false;
1194
1195         I915_STATE_WARN(panel_pipe == pipe && locked,
1196              "panel assertion failure, pipe %c regs locked\n",
1197              pipe_name(pipe));
1198 }
1199
1200 static void assert_cursor(struct drm_i915_private *dev_priv,
1201                           enum pipe pipe, bool state)
1202 {
1203         struct drm_device *dev = dev_priv->dev;
1204         bool cur_state;
1205
1206         if (IS_845G(dev) || IS_I865G(dev))
1207                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1208         else
1209                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1210
1211         I915_STATE_WARN(cur_state != state,
1212              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1213              pipe_name(pipe), state_string(state), state_string(cur_state));
1214 }
1215 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1216 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1217
1218 void assert_pipe(struct drm_i915_private *dev_priv,
1219                  enum pipe pipe, bool state)
1220 {
1221         int reg;
1222         u32 val;
1223         bool cur_state;
1224         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1225                                                                       pipe);
1226
1227         /* if we need the pipe quirk it must be always on */
1228         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1229             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1230                 state = true;
1231
1232         if (!intel_display_power_is_enabled(dev_priv,
1233                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1234                 cur_state = false;
1235         } else {
1236                 reg = PIPECONF(cpu_transcoder);
1237                 val = I915_READ(reg);
1238                 cur_state = !!(val & PIPECONF_ENABLE);
1239         }
1240
1241         I915_STATE_WARN(cur_state != state,
1242              "pipe %c assertion failure (expected %s, current %s)\n",
1243              pipe_name(pipe), state_string(state), state_string(cur_state));
1244 }
1245
1246 static void assert_plane(struct drm_i915_private *dev_priv,
1247                          enum plane plane, bool state)
1248 {
1249         int reg;
1250         u32 val;
1251         bool cur_state;
1252
1253         reg = DSPCNTR(plane);
1254         val = I915_READ(reg);
1255         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1256         I915_STATE_WARN(cur_state != state,
1257              "plane %c assertion failure (expected %s, current %s)\n",
1258              plane_name(plane), state_string(state), state_string(cur_state));
1259 }
1260
1261 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1262 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1263
1264 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1265                                    enum pipe pipe)
1266 {
1267         struct drm_device *dev = dev_priv->dev;
1268         int reg, i;
1269         u32 val;
1270         int cur_pipe;
1271
1272         /* Primary planes are fixed to pipes on gen4+ */
1273         if (INTEL_INFO(dev)->gen >= 4) {
1274                 reg = DSPCNTR(pipe);
1275                 val = I915_READ(reg);
1276                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1277                      "plane %c assertion failure, should be disabled but not\n",
1278                      plane_name(pipe));
1279                 return;
1280         }
1281
1282         /* Need to check both planes against the pipe */
1283         for_each_pipe(dev_priv, i) {
1284                 reg = DSPCNTR(i);
1285                 val = I915_READ(reg);
1286                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1287                         DISPPLANE_SEL_PIPE_SHIFT;
1288                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1289                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1290                      plane_name(i), pipe_name(pipe));
1291         }
1292 }
1293
1294 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1295                                     enum pipe pipe)
1296 {
1297         struct drm_device *dev = dev_priv->dev;
1298         int reg, sprite;
1299         u32 val;
1300
1301         if (INTEL_INFO(dev)->gen >= 9) {
1302                 for_each_sprite(pipe, sprite) {
1303                         val = I915_READ(PLANE_CTL(pipe, sprite));
1304                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1305                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1306                              sprite, pipe_name(pipe));
1307                 }
1308         } else if (IS_VALLEYVIEW(dev)) {
1309                 for_each_sprite(pipe, sprite) {
1310                         reg = SPCNTR(pipe, sprite);
1311                         val = I915_READ(reg);
1312                         I915_STATE_WARN(val & SP_ENABLE,
1313                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1314                              sprite_name(pipe, sprite), pipe_name(pipe));
1315                 }
1316         } else if (INTEL_INFO(dev)->gen >= 7) {
1317                 reg = SPRCTL(pipe);
1318                 val = I915_READ(reg);
1319                 I915_STATE_WARN(val & SPRITE_ENABLE,
1320                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1321                      plane_name(pipe), pipe_name(pipe));
1322         } else if (INTEL_INFO(dev)->gen >= 5) {
1323                 reg = DVSCNTR(pipe);
1324                 val = I915_READ(reg);
1325                 I915_STATE_WARN(val & DVS_ENABLE,
1326                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1327                      plane_name(pipe), pipe_name(pipe));
1328         }
1329 }
1330
1331 static void assert_vblank_disabled(struct drm_crtc *crtc)
1332 {
1333         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1334                 drm_crtc_vblank_put(crtc);
1335 }
1336
1337 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1338 {
1339         u32 val;
1340         bool enabled;
1341
1342         I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1343
1344         val = I915_READ(PCH_DREF_CONTROL);
1345         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1346                             DREF_SUPERSPREAD_SOURCE_MASK));
1347         I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1348 }
1349
1350 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1351                                            enum pipe pipe)
1352 {
1353         int reg;
1354         u32 val;
1355         bool enabled;
1356
1357         reg = PCH_TRANSCONF(pipe);
1358         val = I915_READ(reg);
1359         enabled = !!(val & TRANS_ENABLE);
1360         I915_STATE_WARN(enabled,
1361              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1362              pipe_name(pipe));
1363 }
1364
1365 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1366                             enum pipe pipe, u32 port_sel, u32 val)
1367 {
1368         if ((val & DP_PORT_EN) == 0)
1369                 return false;
1370
1371         if (HAS_PCH_CPT(dev_priv->dev)) {
1372                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1373                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1374                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1375                         return false;
1376         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1377                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1378                         return false;
1379         } else {
1380                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1381                         return false;
1382         }
1383         return true;
1384 }
1385
1386 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1387                               enum pipe pipe, u32 val)
1388 {
1389         if ((val & SDVO_ENABLE) == 0)
1390                 return false;
1391
1392         if (HAS_PCH_CPT(dev_priv->dev)) {
1393                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1394                         return false;
1395         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1396                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1397                         return false;
1398         } else {
1399                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1400                         return false;
1401         }
1402         return true;
1403 }
1404
1405 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1406                               enum pipe pipe, u32 val)
1407 {
1408         if ((val & LVDS_PORT_EN) == 0)
1409                 return false;
1410
1411         if (HAS_PCH_CPT(dev_priv->dev)) {
1412                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1413                         return false;
1414         } else {
1415                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1416                         return false;
1417         }
1418         return true;
1419 }
1420
1421 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1422                               enum pipe pipe, u32 val)
1423 {
1424         if ((val & ADPA_DAC_ENABLE) == 0)
1425                 return false;
1426         if (HAS_PCH_CPT(dev_priv->dev)) {
1427                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1428                         return false;
1429         } else {
1430                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1431                         return false;
1432         }
1433         return true;
1434 }
1435
1436 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1437                                    enum pipe pipe, int reg, u32 port_sel)
1438 {
1439         u32 val = I915_READ(reg);
1440         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1441              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1442              reg, pipe_name(pipe));
1443
1444         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1445              && (val & DP_PIPEB_SELECT),
1446              "IBX PCH dp port still using transcoder B\n");
1447 }
1448
1449 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1450                                      enum pipe pipe, int reg)
1451 {
1452         u32 val = I915_READ(reg);
1453         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1454              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1455              reg, pipe_name(pipe));
1456
1457         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1458              && (val & SDVO_PIPE_B_SELECT),
1459              "IBX PCH hdmi port still using transcoder B\n");
1460 }
1461
1462 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1463                                       enum pipe pipe)
1464 {
1465         int reg;
1466         u32 val;
1467
1468         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1469         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1470         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1471
1472         reg = PCH_ADPA;
1473         val = I915_READ(reg);
1474         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1475              "PCH VGA enabled on transcoder %c, should be disabled\n",
1476              pipe_name(pipe));
1477
1478         reg = PCH_LVDS;
1479         val = I915_READ(reg);
1480         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1481              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1482              pipe_name(pipe));
1483
1484         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1485         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1486         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1487 }
1488
1489 static void intel_init_dpio(struct drm_device *dev)
1490 {
1491         struct drm_i915_private *dev_priv = dev->dev_private;
1492
1493         if (!IS_VALLEYVIEW(dev))
1494                 return;
1495
1496         /*
1497          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1498          * CHV x1 PHY (DP/HDMI D)
1499          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1500          */
1501         if (IS_CHERRYVIEW(dev)) {
1502                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1503                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1504         } else {
1505                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1506         }
1507 }
1508
1509 static void vlv_enable_pll(struct intel_crtc *crtc,
1510                            const struct intel_crtc_state *pipe_config)
1511 {
1512         struct drm_device *dev = crtc->base.dev;
1513         struct drm_i915_private *dev_priv = dev->dev_private;
1514         int reg = DPLL(crtc->pipe);
1515         u32 dpll = pipe_config->dpll_hw_state.dpll;
1516
1517         assert_pipe_disabled(dev_priv, crtc->pipe);
1518
1519         /* No really, not for ILK+ */
1520         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1521
1522         /* PLL is protected by panel, make sure we can write it */
1523         if (IS_MOBILE(dev_priv->dev))
1524                 assert_panel_unlocked(dev_priv, crtc->pipe);
1525
1526         I915_WRITE(reg, dpll);
1527         POSTING_READ(reg);
1528         udelay(150);
1529
1530         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1531                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1532
1533         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1534         POSTING_READ(DPLL_MD(crtc->pipe));
1535
1536         /* We do this three times for luck */
1537         I915_WRITE(reg, dpll);
1538         POSTING_READ(reg);
1539         udelay(150); /* wait for warmup */
1540         I915_WRITE(reg, dpll);
1541         POSTING_READ(reg);
1542         udelay(150); /* wait for warmup */
1543         I915_WRITE(reg, dpll);
1544         POSTING_READ(reg);
1545         udelay(150); /* wait for warmup */
1546 }
1547
1548 static void chv_enable_pll(struct intel_crtc *crtc,
1549                            const struct intel_crtc_state *pipe_config)
1550 {
1551         struct drm_device *dev = crtc->base.dev;
1552         struct drm_i915_private *dev_priv = dev->dev_private;
1553         int pipe = crtc->pipe;
1554         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1555         u32 tmp;
1556
1557         assert_pipe_disabled(dev_priv, crtc->pipe);
1558
1559         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1560
1561         mutex_lock(&dev_priv->dpio_lock);
1562
1563         /* Enable back the 10bit clock to display controller */
1564         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1565         tmp |= DPIO_DCLKP_EN;
1566         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1567
1568         /*
1569          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1570          */
1571         udelay(1);
1572
1573         /* Enable PLL */
1574         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1575
1576         /* Check PLL is locked */
1577         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1578                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1579
1580         /* not sure when this should be written */
1581         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1582         POSTING_READ(DPLL_MD(pipe));
1583
1584         mutex_unlock(&dev_priv->dpio_lock);
1585 }
1586
1587 static int intel_num_dvo_pipes(struct drm_device *dev)
1588 {
1589         struct intel_crtc *crtc;
1590         int count = 0;
1591
1592         for_each_intel_crtc(dev, crtc)
1593                 count += crtc->active &&
1594                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1595
1596         return count;
1597 }
1598
1599 static void i9xx_enable_pll(struct intel_crtc *crtc)
1600 {
1601         struct drm_device *dev = crtc->base.dev;
1602         struct drm_i915_private *dev_priv = dev->dev_private;
1603         int reg = DPLL(crtc->pipe);
1604         u32 dpll = crtc->config->dpll_hw_state.dpll;
1605
1606         assert_pipe_disabled(dev_priv, crtc->pipe);
1607
1608         /* No really, not for ILK+ */
1609         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1610
1611         /* PLL is protected by panel, make sure we can write it */
1612         if (IS_MOBILE(dev) && !IS_I830(dev))
1613                 assert_panel_unlocked(dev_priv, crtc->pipe);
1614
1615         /* Enable DVO 2x clock on both PLLs if necessary */
1616         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1617                 /*
1618                  * It appears to be important that we don't enable this
1619                  * for the current pipe before otherwise configuring the
1620                  * PLL. No idea how this should be handled if multiple
1621                  * DVO outputs are enabled simultaneosly.
1622                  */
1623                 dpll |= DPLL_DVO_2X_MODE;
1624                 I915_WRITE(DPLL(!crtc->pipe),
1625                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1626         }
1627
1628         /* Wait for the clocks to stabilize. */
1629         POSTING_READ(reg);
1630         udelay(150);
1631
1632         if (INTEL_INFO(dev)->gen >= 4) {
1633                 I915_WRITE(DPLL_MD(crtc->pipe),
1634                            crtc->config->dpll_hw_state.dpll_md);
1635         } else {
1636                 /* The pixel multiplier can only be updated once the
1637                  * DPLL is enabled and the clocks are stable.
1638                  *
1639                  * So write it again.
1640                  */
1641                 I915_WRITE(reg, dpll);
1642         }
1643
1644         /* We do this three times for luck */
1645         I915_WRITE(reg, dpll);
1646         POSTING_READ(reg);
1647         udelay(150); /* wait for warmup */
1648         I915_WRITE(reg, dpll);
1649         POSTING_READ(reg);
1650         udelay(150); /* wait for warmup */
1651         I915_WRITE(reg, dpll);
1652         POSTING_READ(reg);
1653         udelay(150); /* wait for warmup */
1654 }
1655
1656 /**
1657  * i9xx_disable_pll - disable a PLL
1658  * @dev_priv: i915 private structure
1659  * @pipe: pipe PLL to disable
1660  *
1661  * Disable the PLL for @pipe, making sure the pipe is off first.
1662  *
1663  * Note!  This is for pre-ILK only.
1664  */
1665 static void i9xx_disable_pll(struct intel_crtc *crtc)
1666 {
1667         struct drm_device *dev = crtc->base.dev;
1668         struct drm_i915_private *dev_priv = dev->dev_private;
1669         enum pipe pipe = crtc->pipe;
1670
1671         /* Disable DVO 2x clock on both PLLs if necessary */
1672         if (IS_I830(dev) &&
1673             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1674             intel_num_dvo_pipes(dev) == 1) {
1675                 I915_WRITE(DPLL(PIPE_B),
1676                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1677                 I915_WRITE(DPLL(PIPE_A),
1678                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1679         }
1680
1681         /* Don't disable pipe or pipe PLLs if needed */
1682         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1683             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1684                 return;
1685
1686         /* Make sure the pipe isn't still relying on us */
1687         assert_pipe_disabled(dev_priv, pipe);
1688
1689         I915_WRITE(DPLL(pipe), 0);
1690         POSTING_READ(DPLL(pipe));
1691 }
1692
1693 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1694 {
1695         u32 val = 0;
1696
1697         /* Make sure the pipe isn't still relying on us */
1698         assert_pipe_disabled(dev_priv, pipe);
1699
1700         /*
1701          * Leave integrated clock source and reference clock enabled for pipe B.
1702          * The latter is needed for VGA hotplug / manual detection.
1703          */
1704         if (pipe == PIPE_B)
1705                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1706         I915_WRITE(DPLL(pipe), val);
1707         POSTING_READ(DPLL(pipe));
1708
1709 }
1710
1711 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 {
1713         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1714         u32 val;
1715
1716         /* Make sure the pipe isn't still relying on us */
1717         assert_pipe_disabled(dev_priv, pipe);
1718
1719         /* Set PLL en = 0 */
1720         val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1721         if (pipe != PIPE_A)
1722                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1723         I915_WRITE(DPLL(pipe), val);
1724         POSTING_READ(DPLL(pipe));
1725
1726         mutex_lock(&dev_priv->dpio_lock);
1727
1728         /* Disable 10bit clock to display controller */
1729         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1730         val &= ~DPIO_DCLKP_EN;
1731         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1732
1733         /* disable left/right clock distribution */
1734         if (pipe != PIPE_B) {
1735                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1736                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1737                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1738         } else {
1739                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1740                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1741                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1742         }
1743
1744         mutex_unlock(&dev_priv->dpio_lock);
1745 }
1746
1747 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1748                 struct intel_digital_port *dport)
1749 {
1750         u32 port_mask;
1751         int dpll_reg;
1752
1753         switch (dport->port) {
1754         case PORT_B:
1755                 port_mask = DPLL_PORTB_READY_MASK;
1756                 dpll_reg = DPLL(0);
1757                 break;
1758         case PORT_C:
1759                 port_mask = DPLL_PORTC_READY_MASK;
1760                 dpll_reg = DPLL(0);
1761                 break;
1762         case PORT_D:
1763                 port_mask = DPLL_PORTD_READY_MASK;
1764                 dpll_reg = DPIO_PHY_STATUS;
1765                 break;
1766         default:
1767                 BUG();
1768         }
1769
1770         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1771                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1772                      port_name(dport->port), I915_READ(dpll_reg));
1773 }
1774
1775 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1776 {
1777         struct drm_device *dev = crtc->base.dev;
1778         struct drm_i915_private *dev_priv = dev->dev_private;
1779         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1780
1781         if (WARN_ON(pll == NULL))
1782                 return;
1783
1784         WARN_ON(!pll->config.crtc_mask);
1785         if (pll->active == 0) {
1786                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1787                 WARN_ON(pll->on);
1788                 assert_shared_dpll_disabled(dev_priv, pll);
1789
1790                 pll->mode_set(dev_priv, pll);
1791         }
1792 }
1793
1794 /**
1795  * intel_enable_shared_dpll - enable PCH PLL
1796  * @dev_priv: i915 private structure
1797  * @pipe: pipe PLL to enable
1798  *
1799  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1800  * drives the transcoder clock.
1801  */
1802 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1803 {
1804         struct drm_device *dev = crtc->base.dev;
1805         struct drm_i915_private *dev_priv = dev->dev_private;
1806         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1807
1808         if (WARN_ON(pll == NULL))
1809                 return;
1810
1811         if (WARN_ON(pll->config.crtc_mask == 0))
1812                 return;
1813
1814         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1815                       pll->name, pll->active, pll->on,
1816                       crtc->base.base.id);
1817
1818         if (pll->active++) {
1819                 WARN_ON(!pll->on);
1820                 assert_shared_dpll_enabled(dev_priv, pll);
1821                 return;
1822         }
1823         WARN_ON(pll->on);
1824
1825         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1826
1827         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1828         pll->enable(dev_priv, pll);
1829         pll->on = true;
1830 }
1831
1832 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1833 {
1834         struct drm_device *dev = crtc->base.dev;
1835         struct drm_i915_private *dev_priv = dev->dev_private;
1836         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1837
1838         /* PCH only available on ILK+ */
1839         BUG_ON(INTEL_INFO(dev)->gen < 5);
1840         if (WARN_ON(pll == NULL))
1841                return;
1842
1843         if (WARN_ON(pll->config.crtc_mask == 0))
1844                 return;
1845
1846         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1847                       pll->name, pll->active, pll->on,
1848                       crtc->base.base.id);
1849
1850         if (WARN_ON(pll->active == 0)) {
1851                 assert_shared_dpll_disabled(dev_priv, pll);
1852                 return;
1853         }
1854
1855         assert_shared_dpll_enabled(dev_priv, pll);
1856         WARN_ON(!pll->on);
1857         if (--pll->active)
1858                 return;
1859
1860         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1861         pll->disable(dev_priv, pll);
1862         pll->on = false;
1863
1864         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1865 }
1866
1867 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1868                                            enum pipe pipe)
1869 {
1870         struct drm_device *dev = dev_priv->dev;
1871         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1872         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1873         uint32_t reg, val, pipeconf_val;
1874
1875         /* PCH only available on ILK+ */
1876         BUG_ON(!HAS_PCH_SPLIT(dev));
1877
1878         /* Make sure PCH DPLL is enabled */
1879         assert_shared_dpll_enabled(dev_priv,
1880                                    intel_crtc_to_shared_dpll(intel_crtc));
1881
1882         /* FDI must be feeding us bits for PCH ports */
1883         assert_fdi_tx_enabled(dev_priv, pipe);
1884         assert_fdi_rx_enabled(dev_priv, pipe);
1885
1886         if (HAS_PCH_CPT(dev)) {
1887                 /* Workaround: Set the timing override bit before enabling the
1888                  * pch transcoder. */
1889                 reg = TRANS_CHICKEN2(pipe);
1890                 val = I915_READ(reg);
1891                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1892                 I915_WRITE(reg, val);
1893         }
1894
1895         reg = PCH_TRANSCONF(pipe);
1896         val = I915_READ(reg);
1897         pipeconf_val = I915_READ(PIPECONF(pipe));
1898
1899         if (HAS_PCH_IBX(dev_priv->dev)) {
1900                 /*
1901                  * make the BPC in transcoder be consistent with
1902                  * that in pipeconf reg.
1903                  */
1904                 val &= ~PIPECONF_BPC_MASK;
1905                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1906         }
1907
1908         val &= ~TRANS_INTERLACE_MASK;
1909         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1910                 if (HAS_PCH_IBX(dev_priv->dev) &&
1911                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1912                         val |= TRANS_LEGACY_INTERLACED_ILK;
1913                 else
1914                         val |= TRANS_INTERLACED;
1915         else
1916                 val |= TRANS_PROGRESSIVE;
1917
1918         I915_WRITE(reg, val | TRANS_ENABLE);
1919         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1920                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1921 }
1922
1923 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1924                                       enum transcoder cpu_transcoder)
1925 {
1926         u32 val, pipeconf_val;
1927
1928         /* PCH only available on ILK+ */
1929         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1930
1931         /* FDI must be feeding us bits for PCH ports */
1932         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1933         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1934
1935         /* Workaround: set timing override bit. */
1936         val = I915_READ(_TRANSA_CHICKEN2);
1937         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1938         I915_WRITE(_TRANSA_CHICKEN2, val);
1939
1940         val = TRANS_ENABLE;
1941         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1942
1943         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1944             PIPECONF_INTERLACED_ILK)
1945                 val |= TRANS_INTERLACED;
1946         else
1947                 val |= TRANS_PROGRESSIVE;
1948
1949         I915_WRITE(LPT_TRANSCONF, val);
1950         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1951                 DRM_ERROR("Failed to enable PCH transcoder\n");
1952 }
1953
1954 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1955                                             enum pipe pipe)
1956 {
1957         struct drm_device *dev = dev_priv->dev;
1958         uint32_t reg, val;
1959
1960         /* FDI relies on the transcoder */
1961         assert_fdi_tx_disabled(dev_priv, pipe);
1962         assert_fdi_rx_disabled(dev_priv, pipe);
1963
1964         /* Ports must be off as well */
1965         assert_pch_ports_disabled(dev_priv, pipe);
1966
1967         reg = PCH_TRANSCONF(pipe);
1968         val = I915_READ(reg);
1969         val &= ~TRANS_ENABLE;
1970         I915_WRITE(reg, val);
1971         /* wait for PCH transcoder off, transcoder state */
1972         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1973                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1974
1975         if (!HAS_PCH_IBX(dev)) {
1976                 /* Workaround: Clear the timing override chicken bit again. */
1977                 reg = TRANS_CHICKEN2(pipe);
1978                 val = I915_READ(reg);
1979                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1980                 I915_WRITE(reg, val);
1981         }
1982 }
1983
1984 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1985 {
1986         u32 val;
1987
1988         val = I915_READ(LPT_TRANSCONF);
1989         val &= ~TRANS_ENABLE;
1990         I915_WRITE(LPT_TRANSCONF, val);
1991         /* wait for PCH transcoder off, transcoder state */
1992         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1993                 DRM_ERROR("Failed to disable PCH transcoder\n");
1994
1995         /* Workaround: clear timing override bit. */
1996         val = I915_READ(_TRANSA_CHICKEN2);
1997         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1998         I915_WRITE(_TRANSA_CHICKEN2, val);
1999 }
2000
2001 /**
2002  * intel_enable_pipe - enable a pipe, asserting requirements
2003  * @crtc: crtc responsible for the pipe
2004  *
2005  * Enable @crtc's pipe, making sure that various hardware specific requirements
2006  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2007  */
2008 static void intel_enable_pipe(struct intel_crtc *crtc)
2009 {
2010         struct drm_device *dev = crtc->base.dev;
2011         struct drm_i915_private *dev_priv = dev->dev_private;
2012         enum pipe pipe = crtc->pipe;
2013         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2014                                                                       pipe);
2015         enum pipe pch_transcoder;
2016         int reg;
2017         u32 val;
2018
2019         assert_planes_disabled(dev_priv, pipe);
2020         assert_cursor_disabled(dev_priv, pipe);
2021         assert_sprites_disabled(dev_priv, pipe);
2022
2023         if (HAS_PCH_LPT(dev_priv->dev))
2024                 pch_transcoder = TRANSCODER_A;
2025         else
2026                 pch_transcoder = pipe;
2027
2028         /*
2029          * A pipe without a PLL won't actually be able to drive bits from
2030          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2031          * need the check.
2032          */
2033         if (!HAS_PCH_SPLIT(dev_priv->dev))
2034                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2035                         assert_dsi_pll_enabled(dev_priv);
2036                 else
2037                         assert_pll_enabled(dev_priv, pipe);
2038         else {
2039                 if (crtc->config->has_pch_encoder) {
2040                         /* if driving the PCH, we need FDI enabled */
2041                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2042                         assert_fdi_tx_pll_enabled(dev_priv,
2043                                                   (enum pipe) cpu_transcoder);
2044                 }
2045                 /* FIXME: assert CPU port conditions for SNB+ */
2046         }
2047
2048         reg = PIPECONF(cpu_transcoder);
2049         val = I915_READ(reg);
2050         if (val & PIPECONF_ENABLE) {
2051                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2052                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2053                 return;
2054         }
2055
2056         I915_WRITE(reg, val | PIPECONF_ENABLE);
2057         POSTING_READ(reg);
2058 }
2059
2060 /**
2061  * intel_disable_pipe - disable a pipe, asserting requirements
2062  * @crtc: crtc whose pipes is to be disabled
2063  *
2064  * Disable the pipe of @crtc, making sure that various hardware
2065  * specific requirements are met, if applicable, e.g. plane
2066  * disabled, panel fitter off, etc.
2067  *
2068  * Will wait until the pipe has shut down before returning.
2069  */
2070 static void intel_disable_pipe(struct intel_crtc *crtc)
2071 {
2072         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2073         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2074         enum pipe pipe = crtc->pipe;
2075         int reg;
2076         u32 val;
2077
2078         /*
2079          * Make sure planes won't keep trying to pump pixels to us,
2080          * or we might hang the display.
2081          */
2082         assert_planes_disabled(dev_priv, pipe);
2083         assert_cursor_disabled(dev_priv, pipe);
2084         assert_sprites_disabled(dev_priv, pipe);
2085
2086         reg = PIPECONF(cpu_transcoder);
2087         val = I915_READ(reg);
2088         if ((val & PIPECONF_ENABLE) == 0)
2089                 return;
2090
2091         /*
2092          * Double wide has implications for planes
2093          * so best keep it disabled when not needed.
2094          */
2095         if (crtc->config->double_wide)
2096                 val &= ~PIPECONF_DOUBLE_WIDE;
2097
2098         /* Don't disable pipe or pipe PLLs if needed */
2099         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2100             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2101                 val &= ~PIPECONF_ENABLE;
2102
2103         I915_WRITE(reg, val);
2104         if ((val & PIPECONF_ENABLE) == 0)
2105                 intel_wait_for_pipe_off(crtc);
2106 }
2107
2108 /*
2109  * Plane regs are double buffered, going from enabled->disabled needs a
2110  * trigger in order to latch.  The display address reg provides this.
2111  */
2112 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2113                                enum plane plane)
2114 {
2115         struct drm_device *dev = dev_priv->dev;
2116         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2117
2118         I915_WRITE(reg, I915_READ(reg));
2119         POSTING_READ(reg);
2120 }
2121
2122 /**
2123  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2124  * @plane:  plane to be enabled
2125  * @crtc: crtc for the plane
2126  *
2127  * Enable @plane on @crtc, making sure that the pipe is running first.
2128  */
2129 static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2130                                           struct drm_crtc *crtc)
2131 {
2132         struct drm_device *dev = plane->dev;
2133         struct drm_i915_private *dev_priv = dev->dev_private;
2134         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2135
2136         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2137         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2138
2139         if (intel_crtc->primary_enabled)
2140                 return;
2141
2142         intel_crtc->primary_enabled = true;
2143
2144         dev_priv->display.update_primary_plane(crtc, plane->fb,
2145                                                crtc->x, crtc->y);
2146
2147         /*
2148          * BDW signals flip done immediately if the plane
2149          * is disabled, even if the plane enable is already
2150          * armed to occur at the next vblank :(
2151          */
2152         if (IS_BROADWELL(dev))
2153                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2154 }
2155
2156 /**
2157  * intel_disable_primary_hw_plane - disable the primary hardware plane
2158  * @plane: plane to be disabled
2159  * @crtc: crtc for the plane
2160  *
2161  * Disable @plane on @crtc, making sure that the pipe is running first.
2162  */
2163 static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2164                                            struct drm_crtc *crtc)
2165 {
2166         struct drm_device *dev = plane->dev;
2167         struct drm_i915_private *dev_priv = dev->dev_private;
2168         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2169
2170         if (WARN_ON(!intel_crtc->active))
2171                 return;
2172
2173         if (!intel_crtc->primary_enabled)
2174                 return;
2175
2176         intel_crtc->primary_enabled = false;
2177
2178         dev_priv->display.update_primary_plane(crtc, plane->fb,
2179                                                crtc->x, crtc->y);
2180 }
2181
2182 static bool need_vtd_wa(struct drm_device *dev)
2183 {
2184 #ifdef CONFIG_INTEL_IOMMU
2185         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2186                 return true;
2187 #endif
2188         return false;
2189 }
2190
2191 int
2192 intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
2193 {
2194         int tile_height;
2195
2196         tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
2197         return ALIGN(height, tile_height);
2198 }
2199
2200 int
2201 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2202                            struct drm_framebuffer *fb,
2203                            struct intel_engine_cs *pipelined)
2204 {
2205         struct drm_device *dev = fb->dev;
2206         struct drm_i915_private *dev_priv = dev->dev_private;
2207         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2208         u32 alignment;
2209         int ret;
2210
2211         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2212
2213         switch (obj->tiling_mode) {
2214         case I915_TILING_NONE:
2215                 if (INTEL_INFO(dev)->gen >= 9)
2216                         alignment = 256 * 1024;
2217                 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2218                         alignment = 128 * 1024;
2219                 else if (INTEL_INFO(dev)->gen >= 4)
2220                         alignment = 4 * 1024;
2221                 else
2222                         alignment = 64 * 1024;
2223                 break;
2224         case I915_TILING_X:
2225                 if (INTEL_INFO(dev)->gen >= 9)
2226                         alignment = 256 * 1024;
2227                 else {
2228                         /* pin() will align the object as required by fence */
2229                         alignment = 0;
2230                 }
2231                 break;
2232         case I915_TILING_Y:
2233                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2234                 return -EINVAL;
2235         default:
2236                 BUG();
2237         }
2238
2239         /* Note that the w/a also requires 64 PTE of padding following the
2240          * bo. We currently fill all unused PTE with the shadow page and so
2241          * we should always have valid PTE following the scanout preventing
2242          * the VT-d warning.
2243          */
2244         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245                 alignment = 256 * 1024;
2246
2247         /*
2248          * Global gtt pte registers are special registers which actually forward
2249          * writes to a chunk of system memory. Which means that there is no risk
2250          * that the register values disappear as soon as we call
2251          * intel_runtime_pm_put(), so it is correct to wrap only the
2252          * pin/unpin/fence and not more.
2253          */
2254         intel_runtime_pm_get(dev_priv);
2255
2256         dev_priv->mm.interruptible = false;
2257         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2258         if (ret)
2259                 goto err_interruptible;
2260
2261         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2262          * fence, whereas 965+ only requires a fence if using
2263          * framebuffer compression.  For simplicity, we always install
2264          * a fence as the cost is not that onerous.
2265          */
2266         ret = i915_gem_object_get_fence(obj);
2267         if (ret)
2268                 goto err_unpin;
2269
2270         i915_gem_object_pin_fence(obj);
2271
2272         dev_priv->mm.interruptible = true;
2273         intel_runtime_pm_put(dev_priv);
2274         return 0;
2275
2276 err_unpin:
2277         i915_gem_object_unpin_from_display_plane(obj);
2278 err_interruptible:
2279         dev_priv->mm.interruptible = true;
2280         intel_runtime_pm_put(dev_priv);
2281         return ret;
2282 }
2283
2284 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2285 {
2286         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2287
2288         i915_gem_object_unpin_fence(obj);
2289         i915_gem_object_unpin_from_display_plane(obj);
2290 }
2291
2292 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2293  * is assumed to be a power-of-two. */
2294 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2295                                              unsigned int tiling_mode,
2296                                              unsigned int cpp,
2297                                              unsigned int pitch)
2298 {
2299         if (tiling_mode != I915_TILING_NONE) {
2300                 unsigned int tile_rows, tiles;
2301
2302                 tile_rows = *y / 8;
2303                 *y %= 8;
2304
2305                 tiles = *x / (512/cpp);
2306                 *x %= 512/cpp;
2307
2308                 return tile_rows * pitch * 8 + tiles * 4096;
2309         } else {
2310                 unsigned int offset;
2311
2312                 offset = *y * pitch + *x * cpp;
2313                 *y = 0;
2314                 *x = (offset & 4095) / cpp;
2315                 return offset & -4096;
2316         }
2317 }
2318
2319 static int i9xx_format_to_fourcc(int format)
2320 {
2321         switch (format) {
2322         case DISPPLANE_8BPP:
2323                 return DRM_FORMAT_C8;
2324         case DISPPLANE_BGRX555:
2325                 return DRM_FORMAT_XRGB1555;
2326         case DISPPLANE_BGRX565:
2327                 return DRM_FORMAT_RGB565;
2328         default:
2329         case DISPPLANE_BGRX888:
2330                 return DRM_FORMAT_XRGB8888;
2331         case DISPPLANE_RGBX888:
2332                 return DRM_FORMAT_XBGR8888;
2333         case DISPPLANE_BGRX101010:
2334                 return DRM_FORMAT_XRGB2101010;
2335         case DISPPLANE_RGBX101010:
2336                 return DRM_FORMAT_XBGR2101010;
2337         }
2338 }
2339
2340 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2341 {
2342         switch (format) {
2343         case PLANE_CTL_FORMAT_RGB_565:
2344                 return DRM_FORMAT_RGB565;
2345         default:
2346         case PLANE_CTL_FORMAT_XRGB_8888:
2347                 if (rgb_order) {
2348                         if (alpha)
2349                                 return DRM_FORMAT_ABGR8888;
2350                         else
2351                                 return DRM_FORMAT_XBGR8888;
2352                 } else {
2353                         if (alpha)
2354                                 return DRM_FORMAT_ARGB8888;
2355                         else
2356                                 return DRM_FORMAT_XRGB8888;
2357                 }
2358         case PLANE_CTL_FORMAT_XRGB_2101010:
2359                 if (rgb_order)
2360                         return DRM_FORMAT_XBGR2101010;
2361                 else
2362                         return DRM_FORMAT_XRGB2101010;
2363         }
2364 }
2365
2366 static bool
2367 intel_alloc_plane_obj(struct intel_crtc *crtc,
2368                       struct intel_initial_plane_config *plane_config)
2369 {
2370         struct drm_device *dev = crtc->base.dev;
2371         struct drm_i915_gem_object *obj = NULL;
2372         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2373         u32 base = plane_config->base;
2374
2375         if (plane_config->size == 0)
2376                 return false;
2377
2378         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2379                                                              plane_config->size);
2380         if (!obj)
2381                 return false;
2382
2383         obj->tiling_mode = plane_config->tiling;
2384         if (obj->tiling_mode == I915_TILING_X)
2385                 obj->stride = crtc->base.primary->fb->pitches[0];
2386
2387         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2388         mode_cmd.width = crtc->base.primary->fb->width;
2389         mode_cmd.height = crtc->base.primary->fb->height;
2390         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2391
2392         mutex_lock(&dev->struct_mutex);
2393
2394         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2395                                    &mode_cmd, obj)) {
2396                 DRM_DEBUG_KMS("intel fb init failed\n");
2397                 goto out_unref_obj;
2398         }
2399
2400         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2401         mutex_unlock(&dev->struct_mutex);
2402
2403         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2404         return true;
2405
2406 out_unref_obj:
2407         drm_gem_object_unreference(&obj->base);
2408         mutex_unlock(&dev->struct_mutex);
2409         return false;
2410 }
2411
2412 static void
2413 intel_find_plane_obj(struct intel_crtc *intel_crtc,
2414                      struct intel_initial_plane_config *plane_config)
2415 {
2416         struct drm_device *dev = intel_crtc->base.dev;
2417         struct drm_i915_private *dev_priv = dev->dev_private;
2418         struct drm_crtc *c;
2419         struct intel_crtc *i;
2420         struct drm_i915_gem_object *obj;
2421
2422         if (!intel_crtc->base.primary->fb)
2423                 return;
2424
2425         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2426                 return;
2427
2428         kfree(intel_crtc->base.primary->fb);
2429         intel_crtc->base.primary->fb = NULL;
2430
2431         /*
2432          * Failed to alloc the obj, check to see if we should share
2433          * an fb with another CRTC instead
2434          */
2435         for_each_crtc(dev, c) {
2436                 i = to_intel_crtc(c);
2437
2438                 if (c == &intel_crtc->base)
2439                         continue;
2440
2441                 if (!i->active)
2442                         continue;
2443
2444                 obj = intel_fb_obj(c->primary->fb);
2445                 if (obj == NULL)
2446                         continue;
2447
2448                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2449                         if (obj->tiling_mode != I915_TILING_NONE)
2450                                 dev_priv->preserve_bios_swizzle = true;
2451
2452                         drm_framebuffer_reference(c->primary->fb);
2453                         intel_crtc->base.primary->fb = c->primary->fb;
2454                         obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2455                         break;
2456                 }
2457         }
2458 }
2459
2460 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2461                                       struct drm_framebuffer *fb,
2462                                       int x, int y)
2463 {
2464         struct drm_device *dev = crtc->dev;
2465         struct drm_i915_private *dev_priv = dev->dev_private;
2466         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2467         struct drm_i915_gem_object *obj;
2468         int plane = intel_crtc->plane;
2469         unsigned long linear_offset;
2470         u32 dspcntr;
2471         u32 reg = DSPCNTR(plane);
2472         int pixel_size;
2473
2474         if (!intel_crtc->primary_enabled) {
2475                 I915_WRITE(reg, 0);
2476                 if (INTEL_INFO(dev)->gen >= 4)
2477                         I915_WRITE(DSPSURF(plane), 0);
2478                 else
2479                         I915_WRITE(DSPADDR(plane), 0);
2480                 POSTING_READ(reg);
2481                 return;
2482         }
2483
2484         obj = intel_fb_obj(fb);
2485         if (WARN_ON(obj == NULL))
2486                 return;
2487
2488         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2489
2490         dspcntr = DISPPLANE_GAMMA_ENABLE;
2491
2492         dspcntr |= DISPLAY_PLANE_ENABLE;
2493
2494         if (INTEL_INFO(dev)->gen < 4) {
2495                 if (intel_crtc->pipe == PIPE_B)
2496                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2497
2498                 /* pipesrc and dspsize control the size that is scaled from,
2499                  * which should always be the user's requested size.
2500                  */
2501                 I915_WRITE(DSPSIZE(plane),
2502                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2503                            (intel_crtc->config->pipe_src_w - 1));
2504                 I915_WRITE(DSPPOS(plane), 0);
2505         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2506                 I915_WRITE(PRIMSIZE(plane),
2507                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2508                            (intel_crtc->config->pipe_src_w - 1));
2509                 I915_WRITE(PRIMPOS(plane), 0);
2510                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2511         }
2512
2513         switch (fb->pixel_format) {
2514         case DRM_FORMAT_C8:
2515                 dspcntr |= DISPPLANE_8BPP;
2516                 break;
2517         case DRM_FORMAT_XRGB1555:
2518         case DRM_FORMAT_ARGB1555:
2519                 dspcntr |= DISPPLANE_BGRX555;
2520                 break;
2521         case DRM_FORMAT_RGB565:
2522                 dspcntr |= DISPPLANE_BGRX565;
2523                 break;
2524         case DRM_FORMAT_XRGB8888:
2525         case DRM_FORMAT_ARGB8888:
2526                 dspcntr |= DISPPLANE_BGRX888;
2527                 break;
2528         case DRM_FORMAT_XBGR8888:
2529         case DRM_FORMAT_ABGR8888:
2530                 dspcntr |= DISPPLANE_RGBX888;
2531                 break;
2532         case DRM_FORMAT_XRGB2101010:
2533         case DRM_FORMAT_ARGB2101010:
2534                 dspcntr |= DISPPLANE_BGRX101010;
2535                 break;
2536         case DRM_FORMAT_XBGR2101010:
2537         case DRM_FORMAT_ABGR2101010:
2538                 dspcntr |= DISPPLANE_RGBX101010;
2539                 break;
2540         default:
2541                 BUG();
2542         }
2543
2544         if (INTEL_INFO(dev)->gen >= 4 &&
2545             obj->tiling_mode != I915_TILING_NONE)
2546                 dspcntr |= DISPPLANE_TILED;
2547
2548         if (IS_G4X(dev))
2549                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2550
2551         linear_offset = y * fb->pitches[0] + x * pixel_size;
2552
2553         if (INTEL_INFO(dev)->gen >= 4) {
2554                 intel_crtc->dspaddr_offset =
2555                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2556                                                        pixel_size,
2557                                                        fb->pitches[0]);
2558                 linear_offset -= intel_crtc->dspaddr_offset;
2559         } else {
2560                 intel_crtc->dspaddr_offset = linear_offset;
2561         }
2562
2563         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2564                 dspcntr |= DISPPLANE_ROTATE_180;
2565
2566                 x += (intel_crtc->config->pipe_src_w - 1);
2567                 y += (intel_crtc->config->pipe_src_h - 1);
2568
2569                 /* Finding the last pixel of the last line of the display
2570                 data and adding to linear_offset*/
2571                 linear_offset +=
2572                         (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2573                         (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2574         }
2575
2576         I915_WRITE(reg, dspcntr);
2577
2578         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2579                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2580                       fb->pitches[0]);
2581         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2582         if (INTEL_INFO(dev)->gen >= 4) {
2583                 I915_WRITE(DSPSURF(plane),
2584                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2585                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2586                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2587         } else
2588                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2589         POSTING_READ(reg);
2590 }
2591
2592 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2593                                           struct drm_framebuffer *fb,
2594                                           int x, int y)
2595 {
2596         struct drm_device *dev = crtc->dev;
2597         struct drm_i915_private *dev_priv = dev->dev_private;
2598         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2599         struct drm_i915_gem_object *obj;
2600         int plane = intel_crtc->plane;
2601         unsigned long linear_offset;
2602         u32 dspcntr;
2603         u32 reg = DSPCNTR(plane);
2604         int pixel_size;
2605
2606         if (!intel_crtc->primary_enabled) {
2607                 I915_WRITE(reg, 0);
2608                 I915_WRITE(DSPSURF(plane), 0);
2609                 POSTING_READ(reg);
2610                 return;
2611         }
2612
2613         obj = intel_fb_obj(fb);
2614         if (WARN_ON(obj == NULL))
2615                 return;
2616
2617         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2618
2619         dspcntr = DISPPLANE_GAMMA_ENABLE;
2620
2621         dspcntr |= DISPLAY_PLANE_ENABLE;
2622
2623         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2624                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2625
2626         switch (fb->pixel_format) {
2627         case DRM_FORMAT_C8:
2628                 dspcntr |= DISPPLANE_8BPP;
2629                 break;
2630         case DRM_FORMAT_RGB565:
2631                 dspcntr |= DISPPLANE_BGRX565;
2632                 break;
2633         case DRM_FORMAT_XRGB8888:
2634         case DRM_FORMAT_ARGB8888:
2635                 dspcntr |= DISPPLANE_BGRX888;
2636                 break;
2637         case DRM_FORMAT_XBGR8888:
2638         case DRM_FORMAT_ABGR8888:
2639                 dspcntr |= DISPPLANE_RGBX888;
2640                 break;
2641         case DRM_FORMAT_XRGB2101010:
2642         case DRM_FORMAT_ARGB2101010:
2643                 dspcntr |= DISPPLANE_BGRX101010;
2644                 break;
2645         case DRM_FORMAT_XBGR2101010:
2646         case DRM_FORMAT_ABGR2101010:
2647                 dspcntr |= DISPPLANE_RGBX101010;
2648                 break;
2649         default:
2650                 BUG();
2651         }
2652
2653         if (obj->tiling_mode != I915_TILING_NONE)
2654                 dspcntr |= DISPPLANE_TILED;
2655
2656         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2657                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2658
2659         linear_offset = y * fb->pitches[0] + x * pixel_size;
2660         intel_crtc->dspaddr_offset =
2661                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2662                                                pixel_size,
2663                                                fb->pitches[0]);
2664         linear_offset -= intel_crtc->dspaddr_offset;
2665         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2666                 dspcntr |= DISPPLANE_ROTATE_180;
2667
2668                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2669                         x += (intel_crtc->config->pipe_src_w - 1);
2670                         y += (intel_crtc->config->pipe_src_h - 1);
2671
2672                         /* Finding the last pixel of the last line of the display
2673                         data and adding to linear_offset*/
2674                         linear_offset +=
2675                                 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2676                                 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2677                 }
2678         }
2679
2680         I915_WRITE(reg, dspcntr);
2681
2682         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2683                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2684                       fb->pitches[0]);
2685         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2686         I915_WRITE(DSPSURF(plane),
2687                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2688         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2689                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2690         } else {
2691                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2692                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2693         }
2694         POSTING_READ(reg);
2695 }
2696
2697 static void skylake_update_primary_plane(struct drm_crtc *crtc,
2698                                          struct drm_framebuffer *fb,
2699                                          int x, int y)
2700 {
2701         struct drm_device *dev = crtc->dev;
2702         struct drm_i915_private *dev_priv = dev->dev_private;
2703         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2704         struct intel_framebuffer *intel_fb;
2705         struct drm_i915_gem_object *obj;
2706         int pipe = intel_crtc->pipe;
2707         u32 plane_ctl, stride;
2708
2709         if (!intel_crtc->primary_enabled) {
2710                 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2711                 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2712                 POSTING_READ(PLANE_CTL(pipe, 0));
2713                 return;
2714         }
2715
2716         plane_ctl = PLANE_CTL_ENABLE |
2717                     PLANE_CTL_PIPE_GAMMA_ENABLE |
2718                     PLANE_CTL_PIPE_CSC_ENABLE;
2719
2720         switch (fb->pixel_format) {
2721         case DRM_FORMAT_RGB565:
2722                 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2723                 break;
2724         case DRM_FORMAT_XRGB8888:
2725                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2726                 break;
2727         case DRM_FORMAT_XBGR8888:
2728                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2729                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2730                 break;
2731         case DRM_FORMAT_XRGB2101010:
2732                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2733                 break;
2734         case DRM_FORMAT_XBGR2101010:
2735                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2736                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2737                 break;
2738         default:
2739                 BUG();
2740         }
2741
2742         intel_fb = to_intel_framebuffer(fb);
2743         obj = intel_fb->obj;
2744
2745         /*
2746          * The stride is either expressed as a multiple of 64 bytes chunks for
2747          * linear buffers or in number of tiles for tiled buffers.
2748          */
2749         switch (obj->tiling_mode) {
2750         case I915_TILING_NONE:
2751                 stride = fb->pitches[0] >> 6;
2752                 break;
2753         case I915_TILING_X:
2754                 plane_ctl |= PLANE_CTL_TILED_X;
2755                 stride = fb->pitches[0] >> 9;
2756                 break;
2757         default:
2758                 BUG();
2759         }
2760
2761         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2762         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2763                 plane_ctl |= PLANE_CTL_ROTATE_180;
2764
2765         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2766
2767         DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2768                       i915_gem_obj_ggtt_offset(obj),
2769                       x, y, fb->width, fb->height,
2770                       fb->pitches[0]);
2771
2772         I915_WRITE(PLANE_POS(pipe, 0), 0);
2773         I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2774         I915_WRITE(PLANE_SIZE(pipe, 0),
2775                    (intel_crtc->config->pipe_src_h - 1) << 16 |
2776                    (intel_crtc->config->pipe_src_w - 1));
2777         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2778         I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2779
2780         POSTING_READ(PLANE_SURF(pipe, 0));
2781 }
2782
2783 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2784 static int
2785 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2786                            int x, int y, enum mode_set_atomic state)
2787 {
2788         struct drm_device *dev = crtc->dev;
2789         struct drm_i915_private *dev_priv = dev->dev_private;
2790
2791         if (dev_priv->display.disable_fbc)
2792                 dev_priv->display.disable_fbc(dev);
2793
2794         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2795
2796         return 0;
2797 }
2798
2799 static void intel_complete_page_flips(struct drm_device *dev)
2800 {
2801         struct drm_crtc *crtc;
2802
2803         for_each_crtc(dev, crtc) {
2804                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2805                 enum plane plane = intel_crtc->plane;
2806
2807                 intel_prepare_page_flip(dev, plane);
2808                 intel_finish_page_flip_plane(dev, plane);
2809         }
2810 }
2811
2812 static void intel_update_primary_planes(struct drm_device *dev)
2813 {
2814         struct drm_i915_private *dev_priv = dev->dev_private;
2815         struct drm_crtc *crtc;
2816
2817         for_each_crtc(dev, crtc) {
2818                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2819
2820                 drm_modeset_lock(&crtc->mutex, NULL);
2821                 /*
2822                  * FIXME: Once we have proper support for primary planes (and
2823                  * disabling them without disabling the entire crtc) allow again
2824                  * a NULL crtc->primary->fb.
2825                  */
2826                 if (intel_crtc->active && crtc->primary->fb)
2827                         dev_priv->display.update_primary_plane(crtc,
2828                                                                crtc->primary->fb,
2829                                                                crtc->x,
2830                                                                crtc->y);
2831                 drm_modeset_unlock(&crtc->mutex);
2832         }
2833 }
2834
2835 void intel_prepare_reset(struct drm_device *dev)
2836 {
2837         struct drm_i915_private *dev_priv = to_i915(dev);
2838         struct intel_crtc *crtc;
2839
2840         /* no reset support for gen2 */
2841         if (IS_GEN2(dev))
2842                 return;
2843
2844         /* reset doesn't touch the display */
2845         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2846                 return;
2847
2848         drm_modeset_lock_all(dev);
2849
2850         /*
2851          * Disabling the crtcs gracefully seems nicer. Also the
2852          * g33 docs say we should at least disable all the planes.
2853          */
2854         for_each_intel_crtc(dev, crtc) {
2855                 if (crtc->active)
2856                         dev_priv->display.crtc_disable(&crtc->base);
2857         }
2858 }
2859
2860 void intel_finish_reset(struct drm_device *dev)
2861 {
2862         struct drm_i915_private *dev_priv = to_i915(dev);
2863
2864         /*
2865          * Flips in the rings will be nuked by the reset,
2866          * so complete all pending flips so that user space
2867          * will get its events and not get stuck.
2868          */
2869         intel_complete_page_flips(dev);
2870
2871         /* no reset support for gen2 */
2872         if (IS_GEN2(dev))
2873                 return;
2874
2875         /* reset doesn't touch the display */
2876         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2877                 /*
2878                  * Flips in the rings have been nuked by the reset,
2879                  * so update the base address of all primary
2880                  * planes to the the last fb to make sure we're
2881                  * showing the correct fb after a reset.
2882                  */
2883                 intel_update_primary_planes(dev);
2884                 return;
2885         }
2886
2887         /*
2888          * The display has been reset as well,
2889          * so need a full re-initialization.
2890          */
2891         intel_runtime_pm_disable_interrupts(dev_priv);
2892         intel_runtime_pm_enable_interrupts(dev_priv);
2893
2894         intel_modeset_init_hw(dev);
2895
2896         spin_lock_irq(&dev_priv->irq_lock);
2897         if (dev_priv->display.hpd_irq_setup)
2898                 dev_priv->display.hpd_irq_setup(dev);
2899         spin_unlock_irq(&dev_priv->irq_lock);
2900
2901         intel_modeset_setup_hw_state(dev, true);
2902
2903         intel_hpd_init(dev_priv);
2904
2905         drm_modeset_unlock_all(dev);
2906 }
2907
2908 static int
2909 intel_finish_fb(struct drm_framebuffer *old_fb)
2910 {
2911         struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2912         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2913         bool was_interruptible = dev_priv->mm.interruptible;
2914         int ret;
2915
2916         /* Big Hammer, we also need to ensure that any pending
2917          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2918          * current scanout is retired before unpinning the old
2919          * framebuffer.
2920          *
2921          * This should only fail upon a hung GPU, in which case we
2922          * can safely continue.
2923          */
2924         dev_priv->mm.interruptible = false;
2925         ret = i915_gem_object_finish_gpu(obj);
2926         dev_priv->mm.interruptible = was_interruptible;
2927
2928         return ret;
2929 }
2930
2931 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2932 {
2933         struct drm_device *dev = crtc->dev;
2934         struct drm_i915_private *dev_priv = dev->dev_private;
2935         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2936         bool pending;
2937
2938         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2939             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2940                 return false;
2941
2942         spin_lock_irq(&dev->event_lock);
2943         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2944         spin_unlock_irq(&dev->event_lock);
2945
2946         return pending;
2947 }
2948
2949 static void intel_update_pipe_size(struct intel_crtc *crtc)
2950 {
2951         struct drm_device *dev = crtc->base.dev;
2952         struct drm_i915_private *dev_priv = dev->dev_private;
2953         const struct drm_display_mode *adjusted_mode;
2954
2955         if (!i915.fastboot)
2956                 return;
2957
2958         /*
2959          * Update pipe size and adjust fitter if needed: the reason for this is
2960          * that in compute_mode_changes we check the native mode (not the pfit
2961          * mode) to see if we can flip rather than do a full mode set. In the
2962          * fastboot case, we'll flip, but if we don't update the pipesrc and
2963          * pfit state, we'll end up with a big fb scanned out into the wrong
2964          * sized surface.
2965          *
2966          * To fix this properly, we need to hoist the checks up into
2967          * compute_mode_changes (or above), check the actual pfit state and
2968          * whether the platform allows pfit disable with pipe active, and only
2969          * then update the pipesrc and pfit state, even on the flip path.
2970          */
2971
2972         adjusted_mode = &crtc->config->base.adjusted_mode;
2973
2974         I915_WRITE(PIPESRC(crtc->pipe),
2975                    ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2976                    (adjusted_mode->crtc_vdisplay - 1));
2977         if (!crtc->config->pch_pfit.enabled &&
2978             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2979              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2980                 I915_WRITE(PF_CTL(crtc->pipe), 0);
2981                 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2982                 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2983         }
2984         crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
2985         crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
2986 }
2987
2988 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2989 {
2990         struct drm_device *dev = crtc->dev;
2991         struct drm_i915_private *dev_priv = dev->dev_private;
2992         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2993         int pipe = intel_crtc->pipe;
2994         u32 reg, temp;
2995
2996         /* enable normal train */
2997         reg = FDI_TX_CTL(pipe);
2998         temp = I915_READ(reg);
2999         if (IS_IVYBRIDGE(dev)) {
3000                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3001                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3002         } else {
3003                 temp &= ~FDI_LINK_TRAIN_NONE;
3004                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3005         }
3006         I915_WRITE(reg, temp);
3007
3008         reg = FDI_RX_CTL(pipe);
3009         temp = I915_READ(reg);
3010         if (HAS_PCH_CPT(dev)) {
3011                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3012                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3013         } else {
3014                 temp &= ~FDI_LINK_TRAIN_NONE;
3015                 temp |= FDI_LINK_TRAIN_NONE;
3016         }
3017         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3018
3019         /* wait one idle pattern time */
3020         POSTING_READ(reg);
3021         udelay(1000);
3022
3023         /* IVB wants error correction enabled */
3024         if (IS_IVYBRIDGE(dev))
3025                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3026                            FDI_FE_ERRC_ENABLE);
3027 }
3028
3029 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3030 {
3031         return crtc->base.enabled && crtc->active &&
3032                 crtc->config->has_pch_encoder;
3033 }
3034
3035 static void ivb_modeset_global_resources(struct drm_device *dev)
3036 {
3037         struct drm_i915_private *dev_priv = dev->dev_private;
3038         struct intel_crtc *pipe_B_crtc =
3039                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3040         struct intel_crtc *pipe_C_crtc =
3041                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3042         uint32_t temp;
3043
3044         /*
3045          * When everything is off disable fdi C so that we could enable fdi B
3046          * with all lanes. Note that we don't care about enabled pipes without
3047          * an enabled pch encoder.
3048          */
3049         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3050             !pipe_has_enabled_pch(pipe_C_crtc)) {
3051                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3052                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3053
3054                 temp = I915_READ(SOUTH_CHICKEN1);
3055                 temp &= ~FDI_BC_BIFURCATION_SELECT;
3056                 DRM_DEBUG_KMS("disabling fdi C rx\n");
3057                 I915_WRITE(SOUTH_CHICKEN1, temp);
3058         }
3059 }
3060
3061 /* The FDI link training functions for ILK/Ibexpeak. */
3062 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3063 {
3064         struct drm_device *dev = crtc->dev;
3065         struct drm_i915_private *dev_priv = dev->dev_private;
3066         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3067         int pipe = intel_crtc->pipe;
3068         u32 reg, temp, tries;
3069
3070         /* FDI needs bits from pipe first */
3071         assert_pipe_enabled(dev_priv, pipe);
3072
3073         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3074            for train result */
3075         reg = FDI_RX_IMR(pipe);
3076         temp = I915_READ(reg);
3077         temp &= ~FDI_RX_SYMBOL_LOCK;
3078         temp &= ~FDI_RX_BIT_LOCK;
3079         I915_WRITE(reg, temp);
3080         I915_READ(reg);
3081         udelay(150);
3082
3083         /* enable CPU FDI TX and PCH FDI RX */
3084         reg = FDI_TX_CTL(pipe);
3085         temp = I915_READ(reg);
3086         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3087         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3088         temp &= ~FDI_LINK_TRAIN_NONE;
3089         temp |= FDI_LINK_TRAIN_PATTERN_1;
3090         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3091
3092         reg = FDI_RX_CTL(pipe);
3093         temp = I915_READ(reg);
3094         temp &= ~FDI_LINK_TRAIN_NONE;
3095         temp |= FDI_LINK_TRAIN_PATTERN_1;
3096         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3097
3098         POSTING_READ(reg);
3099         udelay(150);
3100
3101         /* Ironlake workaround, enable clock pointer after FDI enable*/
3102         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3103         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3104                    FDI_RX_PHASE_SYNC_POINTER_EN);
3105
3106         reg = FDI_RX_IIR(pipe);
3107         for (tries = 0; tries < 5; tries++) {
3108                 temp = I915_READ(reg);
3109                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3110
3111                 if ((temp & FDI_RX_BIT_LOCK)) {
3112                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3113                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3114                         break;
3115                 }
3116         }
3117         if (tries == 5)
3118                 DRM_ERROR("FDI train 1 fail!\n");
3119
3120         /* Train 2 */
3121         reg = FDI_TX_CTL(pipe);
3122         temp = I915_READ(reg);
3123         temp &= ~FDI_LINK_TRAIN_NONE;
3124         temp |= FDI_LINK_TRAIN_PATTERN_2;
3125         I915_WRITE(reg, temp);
3126
3127         reg = FDI_RX_CTL(pipe);
3128         temp = I915_READ(reg);
3129         temp &= ~FDI_LINK_TRAIN_NONE;
3130         temp |= FDI_LINK_TRAIN_PATTERN_2;
3131         I915_WRITE(reg, temp);
3132
3133         POSTING_READ(reg);
3134         udelay(150);
3135
3136         reg = FDI_RX_IIR(pipe);
3137         for (tries = 0; tries < 5; tries++) {
3138                 temp = I915_READ(reg);
3139                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3140
3141                 if (temp & FDI_RX_SYMBOL_LOCK) {
3142                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3143                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3144                         break;
3145                 }
3146         }
3147         if (tries == 5)
3148                 DRM_ERROR("FDI train 2 fail!\n");
3149
3150         DRM_DEBUG_KMS("FDI train done\n");
3151
3152 }
3153
3154 static const int snb_b_fdi_train_param[] = {
3155         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3156         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3157         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3158         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3159 };
3160
3161 /* The FDI link training functions for SNB/Cougarpoint. */
3162 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3163 {
3164         struct drm_device *dev = crtc->dev;
3165         struct drm_i915_private *dev_priv = dev->dev_private;
3166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3167         int pipe = intel_crtc->pipe;
3168         u32 reg, temp, i, retry;
3169
3170         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3171            for train result */
3172         reg = FDI_RX_IMR(pipe);
3173         temp = I915_READ(reg);
3174         temp &= ~FDI_RX_SYMBOL_LOCK;
3175         temp &= ~FDI_RX_BIT_LOCK;
3176         I915_WRITE(reg, temp);
3177
3178         POSTING_READ(reg);
3179         udelay(150);
3180
3181         /* enable CPU FDI TX and PCH FDI RX */
3182         reg = FDI_TX_CTL(pipe);
3183         temp = I915_READ(reg);
3184         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3185         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3186         temp &= ~FDI_LINK_TRAIN_NONE;
3187         temp |= FDI_LINK_TRAIN_PATTERN_1;
3188         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3189         /* SNB-B */
3190         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3191         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3192
3193         I915_WRITE(FDI_RX_MISC(pipe),
3194                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3195
3196         reg = FDI_RX_CTL(pipe);
3197         temp = I915_READ(reg);
3198         if (HAS_PCH_CPT(dev)) {
3199                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3200                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3201         } else {
3202                 temp &= ~FDI_LINK_TRAIN_NONE;
3203                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3204         }
3205         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3206
3207         POSTING_READ(reg);
3208         udelay(150);
3209
3210         for (i = 0; i < 4; i++) {
3211                 reg = FDI_TX_CTL(pipe);
3212                 temp = I915_READ(reg);
3213                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3214                 temp |= snb_b_fdi_train_param[i];
3215                 I915_WRITE(reg, temp);
3216
3217                 POSTING_READ(reg);
3218                 udelay(500);
3219
3220                 for (retry = 0; retry < 5; retry++) {
3221                         reg = FDI_RX_IIR(pipe);
3222                         temp = I915_READ(reg);
3223                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3224                         if (temp & FDI_RX_BIT_LOCK) {
3225                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3226                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3227                                 break;
3228                         }
3229                         udelay(50);
3230               &n