30d99017f6cca0e65421083ae31d771f8c4e4325
[muen/linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
45
46 /* Primary plane formats supported by all gen */
47 #define COMMON_PRIMARY_FORMATS \
48         DRM_FORMAT_C8, \
49         DRM_FORMAT_RGB565, \
50         DRM_FORMAT_XRGB8888, \
51         DRM_FORMAT_ARGB8888
52
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t intel_primary_formats_gen2[] = {
55         COMMON_PRIMARY_FORMATS,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_ARGB1555,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t intel_primary_formats_gen4[] = {
62         COMMON_PRIMARY_FORMATS, \
63         DRM_FORMAT_XBGR8888,
64         DRM_FORMAT_ABGR8888,
65         DRM_FORMAT_XRGB2101010,
66         DRM_FORMAT_ARGB2101010,
67         DRM_FORMAT_XBGR2101010,
68         DRM_FORMAT_ABGR2101010,
69 };
70
71 /* Cursor formats */
72 static const uint32_t intel_cursor_formats[] = {
73         DRM_FORMAT_ARGB8888,
74 };
75
76 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
77
78 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
79                                 struct intel_crtc_state *pipe_config);
80 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
81                                    struct intel_crtc_state *pipe_config);
82
83 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
84                           int x, int y, struct drm_framebuffer *old_fb);
85 static int intel_framebuffer_init(struct drm_device *dev,
86                                   struct intel_framebuffer *ifb,
87                                   struct drm_mode_fb_cmd2 *mode_cmd,
88                                   struct drm_i915_gem_object *obj);
89 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
90 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
91 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
92                                          struct intel_link_m_n *m_n,
93                                          struct intel_link_m_n *m2_n2);
94 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
95 static void haswell_set_pipeconf(struct drm_crtc *crtc);
96 static void intel_set_pipe_csc(struct drm_crtc *crtc);
97 static void vlv_prepare_pll(struct intel_crtc *crtc,
98                             const struct intel_crtc_state *pipe_config);
99 static void chv_prepare_pll(struct intel_crtc *crtc,
100                             const struct intel_crtc_state *pipe_config);
101 static void intel_begin_crtc_commit(struct drm_crtc *crtc);
102 static void intel_finish_crtc_commit(struct drm_crtc *crtc);
103
104 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
105 {
106         if (!connector->mst_port)
107                 return connector->encoder;
108         else
109                 return &connector->mst_port->mst_encoders[pipe]->base;
110 }
111
112 typedef struct {
113         int     min, max;
114 } intel_range_t;
115
116 typedef struct {
117         int     dot_limit;
118         int     p2_slow, p2_fast;
119 } intel_p2_t;
120
121 typedef struct intel_limit intel_limit_t;
122 struct intel_limit {
123         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
124         intel_p2_t          p2;
125 };
126
127 int
128 intel_pch_rawclk(struct drm_device *dev)
129 {
130         struct drm_i915_private *dev_priv = dev->dev_private;
131
132         WARN_ON(!HAS_PCH_SPLIT(dev));
133
134         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
135 }
136
137 static inline u32 /* units of 100MHz */
138 intel_fdi_link_freq(struct drm_device *dev)
139 {
140         if (IS_GEN5(dev)) {
141                 struct drm_i915_private *dev_priv = dev->dev_private;
142                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
143         } else
144                 return 27;
145 }
146
147 static const intel_limit_t intel_limits_i8xx_dac = {
148         .dot = { .min = 25000, .max = 350000 },
149         .vco = { .min = 908000, .max = 1512000 },
150         .n = { .min = 2, .max = 16 },
151         .m = { .min = 96, .max = 140 },
152         .m1 = { .min = 18, .max = 26 },
153         .m2 = { .min = 6, .max = 16 },
154         .p = { .min = 4, .max = 128 },
155         .p1 = { .min = 2, .max = 33 },
156         .p2 = { .dot_limit = 165000,
157                 .p2_slow = 4, .p2_fast = 2 },
158 };
159
160 static const intel_limit_t intel_limits_i8xx_dvo = {
161         .dot = { .min = 25000, .max = 350000 },
162         .vco = { .min = 908000, .max = 1512000 },
163         .n = { .min = 2, .max = 16 },
164         .m = { .min = 96, .max = 140 },
165         .m1 = { .min = 18, .max = 26 },
166         .m2 = { .min = 6, .max = 16 },
167         .p = { .min = 4, .max = 128 },
168         .p1 = { .min = 2, .max = 33 },
169         .p2 = { .dot_limit = 165000,
170                 .p2_slow = 4, .p2_fast = 4 },
171 };
172
173 static const intel_limit_t intel_limits_i8xx_lvds = {
174         .dot = { .min = 25000, .max = 350000 },
175         .vco = { .min = 908000, .max = 1512000 },
176         .n = { .min = 2, .max = 16 },
177         .m = { .min = 96, .max = 140 },
178         .m1 = { .min = 18, .max = 26 },
179         .m2 = { .min = 6, .max = 16 },
180         .p = { .min = 4, .max = 128 },
181         .p1 = { .min = 1, .max = 6 },
182         .p2 = { .dot_limit = 165000,
183                 .p2_slow = 14, .p2_fast = 7 },
184 };
185
186 static const intel_limit_t intel_limits_i9xx_sdvo = {
187         .dot = { .min = 20000, .max = 400000 },
188         .vco = { .min = 1400000, .max = 2800000 },
189         .n = { .min = 1, .max = 6 },
190         .m = { .min = 70, .max = 120 },
191         .m1 = { .min = 8, .max = 18 },
192         .m2 = { .min = 3, .max = 7 },
193         .p = { .min = 5, .max = 80 },
194         .p1 = { .min = 1, .max = 8 },
195         .p2 = { .dot_limit = 200000,
196                 .p2_slow = 10, .p2_fast = 5 },
197 };
198
199 static const intel_limit_t intel_limits_i9xx_lvds = {
200         .dot = { .min = 20000, .max = 400000 },
201         .vco = { .min = 1400000, .max = 2800000 },
202         .n = { .min = 1, .max = 6 },
203         .m = { .min = 70, .max = 120 },
204         .m1 = { .min = 8, .max = 18 },
205         .m2 = { .min = 3, .max = 7 },
206         .p = { .min = 7, .max = 98 },
207         .p1 = { .min = 1, .max = 8 },
208         .p2 = { .dot_limit = 112000,
209                 .p2_slow = 14, .p2_fast = 7 },
210 };
211
212
213 static const intel_limit_t intel_limits_g4x_sdvo = {
214         .dot = { .min = 25000, .max = 270000 },
215         .vco = { .min = 1750000, .max = 3500000},
216         .n = { .min = 1, .max = 4 },
217         .m = { .min = 104, .max = 138 },
218         .m1 = { .min = 17, .max = 23 },
219         .m2 = { .min = 5, .max = 11 },
220         .p = { .min = 10, .max = 30 },
221         .p1 = { .min = 1, .max = 3},
222         .p2 = { .dot_limit = 270000,
223                 .p2_slow = 10,
224                 .p2_fast = 10
225         },
226 };
227
228 static const intel_limit_t intel_limits_g4x_hdmi = {
229         .dot = { .min = 22000, .max = 400000 },
230         .vco = { .min = 1750000, .max = 3500000},
231         .n = { .min = 1, .max = 4 },
232         .m = { .min = 104, .max = 138 },
233         .m1 = { .min = 16, .max = 23 },
234         .m2 = { .min = 5, .max = 11 },
235         .p = { .min = 5, .max = 80 },
236         .p1 = { .min = 1, .max = 8},
237         .p2 = { .dot_limit = 165000,
238                 .p2_slow = 10, .p2_fast = 5 },
239 };
240
241 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
242         .dot = { .min = 20000, .max = 115000 },
243         .vco = { .min = 1750000, .max = 3500000 },
244         .n = { .min = 1, .max = 3 },
245         .m = { .min = 104, .max = 138 },
246         .m1 = { .min = 17, .max = 23 },
247         .m2 = { .min = 5, .max = 11 },
248         .p = { .min = 28, .max = 112 },
249         .p1 = { .min = 2, .max = 8 },
250         .p2 = { .dot_limit = 0,
251                 .p2_slow = 14, .p2_fast = 14
252         },
253 };
254
255 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
256         .dot = { .min = 80000, .max = 224000 },
257         .vco = { .min = 1750000, .max = 3500000 },
258         .n = { .min = 1, .max = 3 },
259         .m = { .min = 104, .max = 138 },
260         .m1 = { .min = 17, .max = 23 },
261         .m2 = { .min = 5, .max = 11 },
262         .p = { .min = 14, .max = 42 },
263         .p1 = { .min = 2, .max = 6 },
264         .p2 = { .dot_limit = 0,
265                 .p2_slow = 7, .p2_fast = 7
266         },
267 };
268
269 static const intel_limit_t intel_limits_pineview_sdvo = {
270         .dot = { .min = 20000, .max = 400000},
271         .vco = { .min = 1700000, .max = 3500000 },
272         /* Pineview's Ncounter is a ring counter */
273         .n = { .min = 3, .max = 6 },
274         .m = { .min = 2, .max = 256 },
275         /* Pineview only has one combined m divider, which we treat as m2. */
276         .m1 = { .min = 0, .max = 0 },
277         .m2 = { .min = 0, .max = 254 },
278         .p = { .min = 5, .max = 80 },
279         .p1 = { .min = 1, .max = 8 },
280         .p2 = { .dot_limit = 200000,
281                 .p2_slow = 10, .p2_fast = 5 },
282 };
283
284 static const intel_limit_t intel_limits_pineview_lvds = {
285         .dot = { .min = 20000, .max = 400000 },
286         .vco = { .min = 1700000, .max = 3500000 },
287         .n = { .min = 3, .max = 6 },
288         .m = { .min = 2, .max = 256 },
289         .m1 = { .min = 0, .max = 0 },
290         .m2 = { .min = 0, .max = 254 },
291         .p = { .min = 7, .max = 112 },
292         .p1 = { .min = 1, .max = 8 },
293         .p2 = { .dot_limit = 112000,
294                 .p2_slow = 14, .p2_fast = 14 },
295 };
296
297 /* Ironlake / Sandybridge
298  *
299  * We calculate clock using (register_value + 2) for N/M1/M2, so here
300  * the range value for them is (actual_value - 2).
301  */
302 static const intel_limit_t intel_limits_ironlake_dac = {
303         .dot = { .min = 25000, .max = 350000 },
304         .vco = { .min = 1760000, .max = 3510000 },
305         .n = { .min = 1, .max = 5 },
306         .m = { .min = 79, .max = 127 },
307         .m1 = { .min = 12, .max = 22 },
308         .m2 = { .min = 5, .max = 9 },
309         .p = { .min = 5, .max = 80 },
310         .p1 = { .min = 1, .max = 8 },
311         .p2 = { .dot_limit = 225000,
312                 .p2_slow = 10, .p2_fast = 5 },
313 };
314
315 static const intel_limit_t intel_limits_ironlake_single_lvds = {
316         .dot = { .min = 25000, .max = 350000 },
317         .vco = { .min = 1760000, .max = 3510000 },
318         .n = { .min = 1, .max = 3 },
319         .m = { .min = 79, .max = 118 },
320         .m1 = { .min = 12, .max = 22 },
321         .m2 = { .min = 5, .max = 9 },
322         .p = { .min = 28, .max = 112 },
323         .p1 = { .min = 2, .max = 8 },
324         .p2 = { .dot_limit = 225000,
325                 .p2_slow = 14, .p2_fast = 14 },
326 };
327
328 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
329         .dot = { .min = 25000, .max = 350000 },
330         .vco = { .min = 1760000, .max = 3510000 },
331         .n = { .min = 1, .max = 3 },
332         .m = { .min = 79, .max = 127 },
333         .m1 = { .min = 12, .max = 22 },
334         .m2 = { .min = 5, .max = 9 },
335         .p = { .min = 14, .max = 56 },
336         .p1 = { .min = 2, .max = 8 },
337         .p2 = { .dot_limit = 225000,
338                 .p2_slow = 7, .p2_fast = 7 },
339 };
340
341 /* LVDS 100mhz refclk limits. */
342 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
343         .dot = { .min = 25000, .max = 350000 },
344         .vco = { .min = 1760000, .max = 3510000 },
345         .n = { .min = 1, .max = 2 },
346         .m = { .min = 79, .max = 126 },
347         .m1 = { .min = 12, .max = 22 },
348         .m2 = { .min = 5, .max = 9 },
349         .p = { .min = 28, .max = 112 },
350         .p1 = { .min = 2, .max = 8 },
351         .p2 = { .dot_limit = 225000,
352                 .p2_slow = 14, .p2_fast = 14 },
353 };
354
355 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
356         .dot = { .min = 25000, .max = 350000 },
357         .vco = { .min = 1760000, .max = 3510000 },
358         .n = { .min = 1, .max = 3 },
359         .m = { .min = 79, .max = 126 },
360         .m1 = { .min = 12, .max = 22 },
361         .m2 = { .min = 5, .max = 9 },
362         .p = { .min = 14, .max = 42 },
363         .p1 = { .min = 2, .max = 6 },
364         .p2 = { .dot_limit = 225000,
365                 .p2_slow = 7, .p2_fast = 7 },
366 };
367
368 static const intel_limit_t intel_limits_vlv = {
369          /*
370           * These are the data rate limits (measured in fast clocks)
371           * since those are the strictest limits we have. The fast
372           * clock and actual rate limits are more relaxed, so checking
373           * them would make no difference.
374           */
375         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
376         .vco = { .min = 4000000, .max = 6000000 },
377         .n = { .min = 1, .max = 7 },
378         .m1 = { .min = 2, .max = 3 },
379         .m2 = { .min = 11, .max = 156 },
380         .p1 = { .min = 2, .max = 3 },
381         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
382 };
383
384 static const intel_limit_t intel_limits_chv = {
385         /*
386          * These are the data rate limits (measured in fast clocks)
387          * since those are the strictest limits we have.  The fast
388          * clock and actual rate limits are more relaxed, so checking
389          * them would make no difference.
390          */
391         .dot = { .min = 25000 * 5, .max = 540000 * 5},
392         .vco = { .min = 4860000, .max = 6700000 },
393         .n = { .min = 1, .max = 1 },
394         .m1 = { .min = 2, .max = 2 },
395         .m2 = { .min = 24 << 22, .max = 175 << 22 },
396         .p1 = { .min = 2, .max = 4 },
397         .p2 = { .p2_slow = 1, .p2_fast = 14 },
398 };
399
400 static void vlv_clock(int refclk, intel_clock_t *clock)
401 {
402         clock->m = clock->m1 * clock->m2;
403         clock->p = clock->p1 * clock->p2;
404         if (WARN_ON(clock->n == 0 || clock->p == 0))
405                 return;
406         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
407         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
408 }
409
410 /**
411  * Returns whether any output on the specified pipe is of the specified type
412  */
413 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
414 {
415         struct drm_device *dev = crtc->base.dev;
416         struct intel_encoder *encoder;
417
418         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
419                 if (encoder->type == type)
420                         return true;
421
422         return false;
423 }
424
425 /**
426  * Returns whether any output on the specified pipe will have the specified
427  * type after a staged modeset is complete, i.e., the same as
428  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
429  * encoder->crtc.
430  */
431 static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
432 {
433         struct drm_device *dev = crtc->base.dev;
434         struct intel_encoder *encoder;
435
436         for_each_intel_encoder(dev, encoder)
437                 if (encoder->new_crtc == crtc && encoder->type == type)
438                         return true;
439
440         return false;
441 }
442
443 static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
444                                                 int refclk)
445 {
446         struct drm_device *dev = crtc->base.dev;
447         const intel_limit_t *limit;
448
449         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
450                 if (intel_is_dual_link_lvds(dev)) {
451                         if (refclk == 100000)
452                                 limit = &intel_limits_ironlake_dual_lvds_100m;
453                         else
454                                 limit = &intel_limits_ironlake_dual_lvds;
455                 } else {
456                         if (refclk == 100000)
457                                 limit = &intel_limits_ironlake_single_lvds_100m;
458                         else
459                                 limit = &intel_limits_ironlake_single_lvds;
460                 }
461         } else
462                 limit = &intel_limits_ironlake_dac;
463
464         return limit;
465 }
466
467 static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
468 {
469         struct drm_device *dev = crtc->base.dev;
470         const intel_limit_t *limit;
471
472         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
473                 if (intel_is_dual_link_lvds(dev))
474                         limit = &intel_limits_g4x_dual_channel_lvds;
475                 else
476                         limit = &intel_limits_g4x_single_channel_lvds;
477         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
478                    intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
479                 limit = &intel_limits_g4x_hdmi;
480         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
481                 limit = &intel_limits_g4x_sdvo;
482         } else /* The option is for other outputs */
483                 limit = &intel_limits_i9xx_sdvo;
484
485         return limit;
486 }
487
488 static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
489 {
490         struct drm_device *dev = crtc->base.dev;
491         const intel_limit_t *limit;
492
493         if (HAS_PCH_SPLIT(dev))
494                 limit = intel_ironlake_limit(crtc, refclk);
495         else if (IS_G4X(dev)) {
496                 limit = intel_g4x_limit(crtc);
497         } else if (IS_PINEVIEW(dev)) {
498                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
499                         limit = &intel_limits_pineview_lvds;
500                 else
501                         limit = &intel_limits_pineview_sdvo;
502         } else if (IS_CHERRYVIEW(dev)) {
503                 limit = &intel_limits_chv;
504         } else if (IS_VALLEYVIEW(dev)) {
505                 limit = &intel_limits_vlv;
506         } else if (!IS_GEN2(dev)) {
507                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
508                         limit = &intel_limits_i9xx_lvds;
509                 else
510                         limit = &intel_limits_i9xx_sdvo;
511         } else {
512                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
513                         limit = &intel_limits_i8xx_lvds;
514                 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
515                         limit = &intel_limits_i8xx_dvo;
516                 else
517                         limit = &intel_limits_i8xx_dac;
518         }
519         return limit;
520 }
521
522 /* m1 is reserved as 0 in Pineview, n is a ring counter */
523 static void pineview_clock(int refclk, intel_clock_t *clock)
524 {
525         clock->m = clock->m2 + 2;
526         clock->p = clock->p1 * clock->p2;
527         if (WARN_ON(clock->n == 0 || clock->p == 0))
528                 return;
529         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
530         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
531 }
532
533 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
534 {
535         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
536 }
537
538 static void i9xx_clock(int refclk, intel_clock_t *clock)
539 {
540         clock->m = i9xx_dpll_compute_m(clock);
541         clock->p = clock->p1 * clock->p2;
542         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
543                 return;
544         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
545         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546 }
547
548 static void chv_clock(int refclk, intel_clock_t *clock)
549 {
550         clock->m = clock->m1 * clock->m2;
551         clock->p = clock->p1 * clock->p2;
552         if (WARN_ON(clock->n == 0 || clock->p == 0))
553                 return;
554         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
555                         clock->n << 22);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557 }
558
559 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
560 /**
561  * Returns whether the given set of divisors are valid for a given refclk with
562  * the given connectors.
563  */
564
565 static bool intel_PLL_is_valid(struct drm_device *dev,
566                                const intel_limit_t *limit,
567                                const intel_clock_t *clock)
568 {
569         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
570                 INTELPllInvalid("n out of range\n");
571         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
572                 INTELPllInvalid("p1 out of range\n");
573         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
574                 INTELPllInvalid("m2 out of range\n");
575         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
576                 INTELPllInvalid("m1 out of range\n");
577
578         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
579                 if (clock->m1 <= clock->m2)
580                         INTELPllInvalid("m1 <= m2\n");
581
582         if (!IS_VALLEYVIEW(dev)) {
583                 if (clock->p < limit->p.min || limit->p.max < clock->p)
584                         INTELPllInvalid("p out of range\n");
585                 if (clock->m < limit->m.min || limit->m.max < clock->m)
586                         INTELPllInvalid("m out of range\n");
587         }
588
589         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
590                 INTELPllInvalid("vco out of range\n");
591         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
592          * connector, etc., rather than just a single range.
593          */
594         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
595                 INTELPllInvalid("dot out of range\n");
596
597         return true;
598 }
599
600 static bool
601 i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
602                     int target, int refclk, intel_clock_t *match_clock,
603                     intel_clock_t *best_clock)
604 {
605         struct drm_device *dev = crtc->base.dev;
606         intel_clock_t clock;
607         int err = target;
608
609         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
610                 /*
611                  * For LVDS just rely on its current settings for dual-channel.
612                  * We haven't figured out how to reliably set up different
613                  * single/dual channel state, if we even can.
614                  */
615                 if (intel_is_dual_link_lvds(dev))
616                         clock.p2 = limit->p2.p2_fast;
617                 else
618                         clock.p2 = limit->p2.p2_slow;
619         } else {
620                 if (target < limit->p2.dot_limit)
621                         clock.p2 = limit->p2.p2_slow;
622                 else
623                         clock.p2 = limit->p2.p2_fast;
624         }
625
626         memset(best_clock, 0, sizeof(*best_clock));
627
628         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
629              clock.m1++) {
630                 for (clock.m2 = limit->m2.min;
631                      clock.m2 <= limit->m2.max; clock.m2++) {
632                         if (clock.m2 >= clock.m1)
633                                 break;
634                         for (clock.n = limit->n.min;
635                              clock.n <= limit->n.max; clock.n++) {
636                                 for (clock.p1 = limit->p1.min;
637                                         clock.p1 <= limit->p1.max; clock.p1++) {
638                                         int this_err;
639
640                                         i9xx_clock(refclk, &clock);
641                                         if (!intel_PLL_is_valid(dev, limit,
642                                                                 &clock))
643                                                 continue;
644                                         if (match_clock &&
645                                             clock.p != match_clock->p)
646                                                 continue;
647
648                                         this_err = abs(clock.dot - target);
649                                         if (this_err < err) {
650                                                 *best_clock = clock;
651                                                 err = this_err;
652                                         }
653                                 }
654                         }
655                 }
656         }
657
658         return (err != target);
659 }
660
661 static bool
662 pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
663                    int target, int refclk, intel_clock_t *match_clock,
664                    intel_clock_t *best_clock)
665 {
666         struct drm_device *dev = crtc->base.dev;
667         intel_clock_t clock;
668         int err = target;
669
670         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
671                 /*
672                  * For LVDS just rely on its current settings for dual-channel.
673                  * We haven't figured out how to reliably set up different
674                  * single/dual channel state, if we even can.
675                  */
676                 if (intel_is_dual_link_lvds(dev))
677                         clock.p2 = limit->p2.p2_fast;
678                 else
679                         clock.p2 = limit->p2.p2_slow;
680         } else {
681                 if (target < limit->p2.dot_limit)
682                         clock.p2 = limit->p2.p2_slow;
683                 else
684                         clock.p2 = limit->p2.p2_fast;
685         }
686
687         memset(best_clock, 0, sizeof(*best_clock));
688
689         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
690              clock.m1++) {
691                 for (clock.m2 = limit->m2.min;
692                      clock.m2 <= limit->m2.max; clock.m2++) {
693                         for (clock.n = limit->n.min;
694                              clock.n <= limit->n.max; clock.n++) {
695                                 for (clock.p1 = limit->p1.min;
696                                         clock.p1 <= limit->p1.max; clock.p1++) {
697                                         int this_err;
698
699                                         pineview_clock(refclk, &clock);
700                                         if (!intel_PLL_is_valid(dev, limit,
701                                                                 &clock))
702                                                 continue;
703                                         if (match_clock &&
704                                             clock.p != match_clock->p)
705                                                 continue;
706
707                                         this_err = abs(clock.dot - target);
708                                         if (this_err < err) {
709                                                 *best_clock = clock;
710                                                 err = this_err;
711                                         }
712                                 }
713                         }
714                 }
715         }
716
717         return (err != target);
718 }
719
720 static bool
721 g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
722                    int target, int refclk, intel_clock_t *match_clock,
723                    intel_clock_t *best_clock)
724 {
725         struct drm_device *dev = crtc->base.dev;
726         intel_clock_t clock;
727         int max_n;
728         bool found;
729         /* approximately equals target * 0.00585 */
730         int err_most = (target >> 8) + (target >> 9);
731         found = false;
732
733         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
734                 if (intel_is_dual_link_lvds(dev))
735                         clock.p2 = limit->p2.p2_fast;
736                 else
737                         clock.p2 = limit->p2.p2_slow;
738         } else {
739                 if (target < limit->p2.dot_limit)
740                         clock.p2 = limit->p2.p2_slow;
741                 else
742                         clock.p2 = limit->p2.p2_fast;
743         }
744
745         memset(best_clock, 0, sizeof(*best_clock));
746         max_n = limit->n.max;
747         /* based on hardware requirement, prefer smaller n to precision */
748         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
749                 /* based on hardware requirement, prefere larger m1,m2 */
750                 for (clock.m1 = limit->m1.max;
751                      clock.m1 >= limit->m1.min; clock.m1--) {
752                         for (clock.m2 = limit->m2.max;
753                              clock.m2 >= limit->m2.min; clock.m2--) {
754                                 for (clock.p1 = limit->p1.max;
755                                      clock.p1 >= limit->p1.min; clock.p1--) {
756                                         int this_err;
757
758                                         i9xx_clock(refclk, &clock);
759                                         if (!intel_PLL_is_valid(dev, limit,
760                                                                 &clock))
761                                                 continue;
762
763                                         this_err = abs(clock.dot - target);
764                                         if (this_err < err_most) {
765                                                 *best_clock = clock;
766                                                 err_most = this_err;
767                                                 max_n = clock.n;
768                                                 found = true;
769                                         }
770                                 }
771                         }
772                 }
773         }
774         return found;
775 }
776
777 static bool
778 vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
779                    int target, int refclk, intel_clock_t *match_clock,
780                    intel_clock_t *best_clock)
781 {
782         struct drm_device *dev = crtc->base.dev;
783         intel_clock_t clock;
784         unsigned int bestppm = 1000000;
785         /* min update 19.2 MHz */
786         int max_n = min(limit->n.max, refclk / 19200);
787         bool found = false;
788
789         target *= 5; /* fast clock */
790
791         memset(best_clock, 0, sizeof(*best_clock));
792
793         /* based on hardware requirement, prefer smaller n to precision */
794         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
795                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
797                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
798                                 clock.p = clock.p1 * clock.p2;
799                                 /* based on hardware requirement, prefer bigger m1,m2 values */
800                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
801                                         unsigned int ppm, diff;
802
803                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
804                                                                      refclk * clock.m1);
805
806                                         vlv_clock(refclk, &clock);
807
808                                         if (!intel_PLL_is_valid(dev, limit,
809                                                                 &clock))
810                                                 continue;
811
812                                         diff = abs(clock.dot - target);
813                                         ppm = div_u64(1000000ULL * diff, target);
814
815                                         if (ppm < 100 && clock.p > best_clock->p) {
816                                                 bestppm = 0;
817                                                 *best_clock = clock;
818                                                 found = true;
819                                         }
820
821                                         if (bestppm >= 10 && ppm < bestppm - 10) {
822                                                 bestppm = ppm;
823                                                 *best_clock = clock;
824                                                 found = true;
825                                         }
826                                 }
827                         }
828                 }
829         }
830
831         return found;
832 }
833
834 static bool
835 chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
836                    int target, int refclk, intel_clock_t *match_clock,
837                    intel_clock_t *best_clock)
838 {
839         struct drm_device *dev = crtc->base.dev;
840         intel_clock_t clock;
841         uint64_t m2;
842         int found = false;
843
844         memset(best_clock, 0, sizeof(*best_clock));
845
846         /*
847          * Based on hardware doc, the n always set to 1, and m1 always
848          * set to 2.  If requires to support 200Mhz refclk, we need to
849          * revisit this because n may not 1 anymore.
850          */
851         clock.n = 1, clock.m1 = 2;
852         target *= 5;    /* fast clock */
853
854         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
855                 for (clock.p2 = limit->p2.p2_fast;
856                                 clock.p2 >= limit->p2.p2_slow;
857                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
858
859                         clock.p = clock.p1 * clock.p2;
860
861                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
862                                         clock.n) << 22, refclk * clock.m1);
863
864                         if (m2 > INT_MAX/clock.m1)
865                                 continue;
866
867                         clock.m2 = m2;
868
869                         chv_clock(refclk, &clock);
870
871                         if (!intel_PLL_is_valid(dev, limit, &clock))
872                                 continue;
873
874                         /* based on hardware requirement, prefer bigger p
875                          */
876                         if (clock.p > best_clock->p) {
877                                 *best_clock = clock;
878                                 found = true;
879                         }
880                 }
881         }
882
883         return found;
884 }
885
886 bool intel_crtc_active(struct drm_crtc *crtc)
887 {
888         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
889
890         /* Be paranoid as we can arrive here with only partial
891          * state retrieved from the hardware during setup.
892          *
893          * We can ditch the adjusted_mode.crtc_clock check as soon
894          * as Haswell has gained clock readout/fastboot support.
895          *
896          * We can ditch the crtc->primary->fb check as soon as we can
897          * properly reconstruct framebuffers.
898          */
899         return intel_crtc->active && crtc->primary->fb &&
900                 intel_crtc->config->base.adjusted_mode.crtc_clock;
901 }
902
903 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
904                                              enum pipe pipe)
905 {
906         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
907         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
908
909         return intel_crtc->config->cpu_transcoder;
910 }
911
912 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
913 {
914         struct drm_i915_private *dev_priv = dev->dev_private;
915         u32 reg = PIPEDSL(pipe);
916         u32 line1, line2;
917         u32 line_mask;
918
919         if (IS_GEN2(dev))
920                 line_mask = DSL_LINEMASK_GEN2;
921         else
922                 line_mask = DSL_LINEMASK_GEN3;
923
924         line1 = I915_READ(reg) & line_mask;
925         mdelay(5);
926         line2 = I915_READ(reg) & line_mask;
927
928         return line1 == line2;
929 }
930
931 /*
932  * intel_wait_for_pipe_off - wait for pipe to turn off
933  * @crtc: crtc whose pipe to wait for
934  *
935  * After disabling a pipe, we can't wait for vblank in the usual way,
936  * spinning on the vblank interrupt status bit, since we won't actually
937  * see an interrupt when the pipe is disabled.
938  *
939  * On Gen4 and above:
940  *   wait for the pipe register state bit to turn off
941  *
942  * Otherwise:
943  *   wait for the display line value to settle (it usually
944  *   ends up stopping at the start of the next frame).
945  *
946  */
947 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
948 {
949         struct drm_device *dev = crtc->base.dev;
950         struct drm_i915_private *dev_priv = dev->dev_private;
951         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
952         enum pipe pipe = crtc->pipe;
953
954         if (INTEL_INFO(dev)->gen >= 4) {
955                 int reg = PIPECONF(cpu_transcoder);
956
957                 /* Wait for the Pipe State to go off */
958                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
959                              100))
960                         WARN(1, "pipe_off wait timed out\n");
961         } else {
962                 /* Wait for the display line to settle */
963                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
964                         WARN(1, "pipe_off wait timed out\n");
965         }
966 }
967
968 /*
969  * ibx_digital_port_connected - is the specified port connected?
970  * @dev_priv: i915 private structure
971  * @port: the port to test
972  *
973  * Returns true if @port is connected, false otherwise.
974  */
975 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
976                                 struct intel_digital_port *port)
977 {
978         u32 bit;
979
980         if (HAS_PCH_IBX(dev_priv->dev)) {
981                 switch (port->port) {
982                 case PORT_B:
983                         bit = SDE_PORTB_HOTPLUG;
984                         break;
985                 case PORT_C:
986                         bit = SDE_PORTC_HOTPLUG;
987                         break;
988                 case PORT_D:
989                         bit = SDE_PORTD_HOTPLUG;
990                         break;
991                 default:
992                         return true;
993                 }
994         } else {
995                 switch (port->port) {
996                 case PORT_B:
997                         bit = SDE_PORTB_HOTPLUG_CPT;
998                         break;
999                 case PORT_C:
1000                         bit = SDE_PORTC_HOTPLUG_CPT;
1001                         break;
1002                 case PORT_D:
1003                         bit = SDE_PORTD_HOTPLUG_CPT;
1004                         break;
1005                 default:
1006                         return true;
1007                 }
1008         }
1009
1010         return I915_READ(SDEISR) & bit;
1011 }
1012
1013 static const char *state_string(bool enabled)
1014 {
1015         return enabled ? "on" : "off";
1016 }
1017
1018 /* Only for pre-ILK configs */
1019 void assert_pll(struct drm_i915_private *dev_priv,
1020                 enum pipe pipe, bool state)
1021 {
1022         int reg;
1023         u32 val;
1024         bool cur_state;
1025
1026         reg = DPLL(pipe);
1027         val = I915_READ(reg);
1028         cur_state = !!(val & DPLL_VCO_ENABLE);
1029         I915_STATE_WARN(cur_state != state,
1030              "PLL state assertion failure (expected %s, current %s)\n",
1031              state_string(state), state_string(cur_state));
1032 }
1033
1034 /* XXX: the dsi pll is shared between MIPI DSI ports */
1035 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1036 {
1037         u32 val;
1038         bool cur_state;
1039
1040         mutex_lock(&dev_priv->dpio_lock);
1041         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1042         mutex_unlock(&dev_priv->dpio_lock);
1043
1044         cur_state = val & DSI_PLL_VCO_EN;
1045         I915_STATE_WARN(cur_state != state,
1046              "DSI PLL state assertion failure (expected %s, current %s)\n",
1047              state_string(state), state_string(cur_state));
1048 }
1049 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1050 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1051
1052 struct intel_shared_dpll *
1053 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1054 {
1055         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1056
1057         if (crtc->config->shared_dpll < 0)
1058                 return NULL;
1059
1060         return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1061 }
1062
1063 /* For ILK+ */
1064 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1065                         struct intel_shared_dpll *pll,
1066                         bool state)
1067 {
1068         bool cur_state;
1069         struct intel_dpll_hw_state hw_state;
1070
1071         if (WARN (!pll,
1072                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1073                 return;
1074
1075         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1076         I915_STATE_WARN(cur_state != state,
1077              "%s assertion failure (expected %s, current %s)\n",
1078              pll->name, state_string(state), state_string(cur_state));
1079 }
1080
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082                           enum pipe pipe, bool state)
1083 {
1084         int reg;
1085         u32 val;
1086         bool cur_state;
1087         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1088                                                                       pipe);
1089
1090         if (HAS_DDI(dev_priv->dev)) {
1091                 /* DDI does not have a specific FDI_TX register */
1092                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1093                 val = I915_READ(reg);
1094                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1095         } else {
1096                 reg = FDI_TX_CTL(pipe);
1097                 val = I915_READ(reg);
1098                 cur_state = !!(val & FDI_TX_ENABLE);
1099         }
1100         I915_STATE_WARN(cur_state != state,
1101              "FDI TX state assertion failure (expected %s, current %s)\n",
1102              state_string(state), state_string(cur_state));
1103 }
1104 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1105 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1106
1107 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1108                           enum pipe pipe, bool state)
1109 {
1110         int reg;
1111         u32 val;
1112         bool cur_state;
1113
1114         reg = FDI_RX_CTL(pipe);
1115         val = I915_READ(reg);
1116         cur_state = !!(val & FDI_RX_ENABLE);
1117         I915_STATE_WARN(cur_state != state,
1118              "FDI RX state assertion failure (expected %s, current %s)\n",
1119              state_string(state), state_string(cur_state));
1120 }
1121 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1122 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1123
1124 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1125                                       enum pipe pipe)
1126 {
1127         int reg;
1128         u32 val;
1129
1130         /* ILK FDI PLL is always enabled */
1131         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1132                 return;
1133
1134         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1135         if (HAS_DDI(dev_priv->dev))
1136                 return;
1137
1138         reg = FDI_TX_CTL(pipe);
1139         val = I915_READ(reg);
1140         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1141 }
1142
1143 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1144                        enum pipe pipe, bool state)
1145 {
1146         int reg;
1147         u32 val;
1148         bool cur_state;
1149
1150         reg = FDI_RX_CTL(pipe);
1151         val = I915_READ(reg);
1152         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1153         I915_STATE_WARN(cur_state != state,
1154              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1155              state_string(state), state_string(cur_state));
1156 }
1157
1158 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1159                            enum pipe pipe)
1160 {
1161         struct drm_device *dev = dev_priv->dev;
1162         int pp_reg;
1163         u32 val;
1164         enum pipe panel_pipe = PIPE_A;
1165         bool locked = true;
1166
1167         if (WARN_ON(HAS_DDI(dev)))
1168                 return;
1169
1170         if (HAS_PCH_SPLIT(dev)) {
1171                 u32 port_sel;
1172
1173                 pp_reg = PCH_PP_CONTROL;
1174                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1175
1176                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1177                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1178                         panel_pipe = PIPE_B;
1179                 /* XXX: else fix for eDP */
1180         } else if (IS_VALLEYVIEW(dev)) {
1181                 /* presumably write lock depends on pipe, not port select */
1182                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1183                 panel_pipe = pipe;
1184         } else {
1185                 pp_reg = PP_CONTROL;
1186                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1187                         panel_pipe = PIPE_B;
1188         }
1189
1190         val = I915_READ(pp_reg);
1191         if (!(val & PANEL_POWER_ON) ||
1192             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1193                 locked = false;
1194
1195         I915_STATE_WARN(panel_pipe == pipe && locked,
1196              "panel assertion failure, pipe %c regs locked\n",
1197              pipe_name(pipe));
1198 }
1199
1200 static void assert_cursor(struct drm_i915_private *dev_priv,
1201                           enum pipe pipe, bool state)
1202 {
1203         struct drm_device *dev = dev_priv->dev;
1204         bool cur_state;
1205
1206         if (IS_845G(dev) || IS_I865G(dev))
1207                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1208         else
1209                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1210
1211         I915_STATE_WARN(cur_state != state,
1212              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1213              pipe_name(pipe), state_string(state), state_string(cur_state));
1214 }
1215 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1216 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1217
1218 void assert_pipe(struct drm_i915_private *dev_priv,
1219                  enum pipe pipe, bool state)
1220 {
1221         int reg;
1222         u32 val;
1223         bool cur_state;
1224         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1225                                                                       pipe);
1226
1227         /* if we need the pipe quirk it must be always on */
1228         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1229             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1230                 state = true;
1231
1232         if (!intel_display_power_is_enabled(dev_priv,
1233                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1234                 cur_state = false;
1235         } else {
1236                 reg = PIPECONF(cpu_transcoder);
1237                 val = I915_READ(reg);
1238                 cur_state = !!(val & PIPECONF_ENABLE);
1239         }
1240
1241         I915_STATE_WARN(cur_state != state,
1242              "pipe %c assertion failure (expected %s, current %s)\n",
1243              pipe_name(pipe), state_string(state), state_string(cur_state));
1244 }
1245
1246 static void assert_plane(struct drm_i915_private *dev_priv,
1247                          enum plane plane, bool state)
1248 {
1249         int reg;
1250         u32 val;
1251         bool cur_state;
1252
1253         reg = DSPCNTR(plane);
1254         val = I915_READ(reg);
1255         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1256         I915_STATE_WARN(cur_state != state,
1257              "plane %c assertion failure (expected %s, current %s)\n",
1258              plane_name(plane), state_string(state), state_string(cur_state));
1259 }
1260
1261 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1262 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1263
1264 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1265                                    enum pipe pipe)
1266 {
1267         struct drm_device *dev = dev_priv->dev;
1268         int reg, i;
1269         u32 val;
1270         int cur_pipe;
1271
1272         /* Primary planes are fixed to pipes on gen4+ */
1273         if (INTEL_INFO(dev)->gen >= 4) {
1274                 reg = DSPCNTR(pipe);
1275                 val = I915_READ(reg);
1276                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1277                      "plane %c assertion failure, should be disabled but not\n",
1278                      plane_name(pipe));
1279                 return;
1280         }
1281
1282         /* Need to check both planes against the pipe */
1283         for_each_pipe(dev_priv, i) {
1284                 reg = DSPCNTR(i);
1285                 val = I915_READ(reg);
1286                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1287                         DISPPLANE_SEL_PIPE_SHIFT;
1288                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1289                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1290                      plane_name(i), pipe_name(pipe));
1291         }
1292 }
1293
1294 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1295                                     enum pipe pipe)
1296 {
1297         struct drm_device *dev = dev_priv->dev;
1298         int reg, sprite;
1299         u32 val;
1300
1301         if (INTEL_INFO(dev)->gen >= 9) {
1302                 for_each_sprite(pipe, sprite) {
1303                         val = I915_READ(PLANE_CTL(pipe, sprite));
1304                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1305                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1306                              sprite, pipe_name(pipe));
1307                 }
1308         } else if (IS_VALLEYVIEW(dev)) {
1309                 for_each_sprite(pipe, sprite) {
1310                         reg = SPCNTR(pipe, sprite);
1311                         val = I915_READ(reg);
1312                         I915_STATE_WARN(val & SP_ENABLE,
1313                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1314                              sprite_name(pipe, sprite), pipe_name(pipe));
1315                 }
1316         } else if (INTEL_INFO(dev)->gen >= 7) {
1317                 reg = SPRCTL(pipe);
1318                 val = I915_READ(reg);
1319                 I915_STATE_WARN(val & SPRITE_ENABLE,
1320                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1321                      plane_name(pipe), pipe_name(pipe));
1322         } else if (INTEL_INFO(dev)->gen >= 5) {
1323                 reg = DVSCNTR(pipe);
1324                 val = I915_READ(reg);
1325                 I915_STATE_WARN(val & DVS_ENABLE,
1326                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1327                      plane_name(pipe), pipe_name(pipe));
1328         }
1329 }
1330
1331 static void assert_vblank_disabled(struct drm_crtc *crtc)
1332 {
1333         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1334                 drm_crtc_vblank_put(crtc);
1335 }
1336
1337 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1338 {
1339         u32 val;
1340         bool enabled;
1341
1342         I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1343
1344         val = I915_READ(PCH_DREF_CONTROL);
1345         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1346                             DREF_SUPERSPREAD_SOURCE_MASK));
1347         I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1348 }
1349
1350 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1351                                            enum pipe pipe)
1352 {
1353         int reg;
1354         u32 val;
1355         bool enabled;
1356
1357         reg = PCH_TRANSCONF(pipe);
1358         val = I915_READ(reg);
1359         enabled = !!(val & TRANS_ENABLE);
1360         I915_STATE_WARN(enabled,
1361              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1362              pipe_name(pipe));
1363 }
1364
1365 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1366                             enum pipe pipe, u32 port_sel, u32 val)
1367 {
1368         if ((val & DP_PORT_EN) == 0)
1369                 return false;
1370
1371         if (HAS_PCH_CPT(dev_priv->dev)) {
1372                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1373                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1374                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1375                         return false;
1376         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1377                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1378                         return false;
1379         } else {
1380                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1381                         return false;
1382         }
1383         return true;
1384 }
1385
1386 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1387                               enum pipe pipe, u32 val)
1388 {
1389         if ((val & SDVO_ENABLE) == 0)
1390                 return false;
1391
1392         if (HAS_PCH_CPT(dev_priv->dev)) {
1393                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1394                         return false;
1395         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1396                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1397                         return false;
1398         } else {
1399                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1400                         return false;
1401         }
1402         return true;
1403 }
1404
1405 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1406                               enum pipe pipe, u32 val)
1407 {
1408         if ((val & LVDS_PORT_EN) == 0)
1409                 return false;
1410
1411         if (HAS_PCH_CPT(dev_priv->dev)) {
1412                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1413                         return false;
1414         } else {
1415                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1416                         return false;
1417         }
1418         return true;
1419 }
1420
1421 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1422                               enum pipe pipe, u32 val)
1423 {
1424         if ((val & ADPA_DAC_ENABLE) == 0)
1425                 return false;
1426         if (HAS_PCH_CPT(dev_priv->dev)) {
1427                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1428                         return false;
1429         } else {
1430                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1431                         return false;
1432         }
1433         return true;
1434 }
1435
1436 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1437                                    enum pipe pipe, int reg, u32 port_sel)
1438 {
1439         u32 val = I915_READ(reg);
1440         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1441              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1442              reg, pipe_name(pipe));
1443
1444         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1445              && (val & DP_PIPEB_SELECT),
1446              "IBX PCH dp port still using transcoder B\n");
1447 }
1448
1449 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1450                                      enum pipe pipe, int reg)
1451 {
1452         u32 val = I915_READ(reg);
1453         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1454              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1455              reg, pipe_name(pipe));
1456
1457         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1458              && (val & SDVO_PIPE_B_SELECT),
1459              "IBX PCH hdmi port still using transcoder B\n");
1460 }
1461
1462 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1463                                       enum pipe pipe)
1464 {
1465         int reg;
1466         u32 val;
1467
1468         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1469         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1470         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1471
1472         reg = PCH_ADPA;
1473         val = I915_READ(reg);
1474         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1475              "PCH VGA enabled on transcoder %c, should be disabled\n",
1476              pipe_name(pipe));
1477
1478         reg = PCH_LVDS;
1479         val = I915_READ(reg);
1480         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1481              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1482              pipe_name(pipe));
1483
1484         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1485         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1486         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1487 }
1488
1489 static void intel_init_dpio(struct drm_device *dev)
1490 {
1491         struct drm_i915_private *dev_priv = dev->dev_private;
1492
1493         if (!IS_VALLEYVIEW(dev))
1494                 return;
1495
1496         /*
1497          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1498          * CHV x1 PHY (DP/HDMI D)
1499          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1500          */
1501         if (IS_CHERRYVIEW(dev)) {
1502                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1503                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1504         } else {
1505                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1506         }
1507 }
1508
1509 static void vlv_enable_pll(struct intel_crtc *crtc,
1510                            const struct intel_crtc_state *pipe_config)
1511 {
1512         struct drm_device *dev = crtc->base.dev;
1513         struct drm_i915_private *dev_priv = dev->dev_private;
1514         int reg = DPLL(crtc->pipe);
1515         u32 dpll = pipe_config->dpll_hw_state.dpll;
1516
1517         assert_pipe_disabled(dev_priv, crtc->pipe);
1518
1519         /* No really, not for ILK+ */
1520         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1521
1522         /* PLL is protected by panel, make sure we can write it */
1523         if (IS_MOBILE(dev_priv->dev))
1524                 assert_panel_unlocked(dev_priv, crtc->pipe);
1525
1526         I915_WRITE(reg, dpll);
1527         POSTING_READ(reg);
1528         udelay(150);
1529
1530         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1531                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1532
1533         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1534         POSTING_READ(DPLL_MD(crtc->pipe));
1535
1536         /* We do this three times for luck */
1537         I915_WRITE(reg, dpll);
1538         POSTING_READ(reg);
1539         udelay(150); /* wait for warmup */
1540         I915_WRITE(reg, dpll);
1541         POSTING_READ(reg);
1542         udelay(150); /* wait for warmup */
1543         I915_WRITE(reg, dpll);
1544         POSTING_READ(reg);
1545         udelay(150); /* wait for warmup */
1546 }
1547
1548 static void chv_enable_pll(struct intel_crtc *crtc,
1549                            const struct intel_crtc_state *pipe_config)
1550 {
1551         struct drm_device *dev = crtc->base.dev;
1552         struct drm_i915_private *dev_priv = dev->dev_private;
1553         int pipe = crtc->pipe;
1554         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1555         u32 tmp;
1556
1557         assert_pipe_disabled(dev_priv, crtc->pipe);
1558
1559         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1560
1561         mutex_lock(&dev_priv->dpio_lock);
1562
1563         /* Enable back the 10bit clock to display controller */
1564         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1565         tmp |= DPIO_DCLKP_EN;
1566         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1567
1568         /*
1569          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1570          */
1571         udelay(1);
1572
1573         /* Enable PLL */
1574         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1575
1576         /* Check PLL is locked */
1577         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1578                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1579
1580         /* not sure when this should be written */
1581         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1582         POSTING_READ(DPLL_MD(pipe));
1583
1584         mutex_unlock(&dev_priv->dpio_lock);
1585 }
1586
1587 static int intel_num_dvo_pipes(struct drm_device *dev)
1588 {
1589         struct intel_crtc *crtc;
1590         int count = 0;
1591
1592         for_each_intel_crtc(dev, crtc)
1593                 count += crtc->active &&
1594                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1595
1596         return count;
1597 }
1598
1599 static void i9xx_enable_pll(struct intel_crtc *crtc)
1600 {
1601         struct drm_device *dev = crtc->base.dev;
1602         struct drm_i915_private *dev_priv = dev->dev_private;
1603         int reg = DPLL(crtc->pipe);
1604         u32 dpll = crtc->config->dpll_hw_state.dpll;
1605
1606         assert_pipe_disabled(dev_priv, crtc->pipe);
1607
1608         /* No really, not for ILK+ */
1609         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1610
1611         /* PLL is protected by panel, make sure we can write it */
1612         if (IS_MOBILE(dev) && !IS_I830(dev))
1613                 assert_panel_unlocked(dev_priv, crtc->pipe);
1614
1615         /* Enable DVO 2x clock on both PLLs if necessary */
1616         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1617                 /*
1618                  * It appears to be important that we don't enable this
1619                  * for the current pipe before otherwise configuring the
1620                  * PLL. No idea how this should be handled if multiple
1621                  * DVO outputs are enabled simultaneosly.
1622                  */
1623                 dpll |= DPLL_DVO_2X_MODE;
1624                 I915_WRITE(DPLL(!crtc->pipe),
1625                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1626         }
1627
1628         /* Wait for the clocks to stabilize. */
1629         POSTING_READ(reg);
1630         udelay(150);
1631
1632         if (INTEL_INFO(dev)->gen >= 4) {
1633                 I915_WRITE(DPLL_MD(crtc->pipe),
1634                            crtc->config->dpll_hw_state.dpll_md);
1635         } else {
1636                 /* The pixel multiplier can only be updated once the
1637                  * DPLL is enabled and the clocks are stable.
1638                  *
1639                  * So write it again.
1640                  */
1641                 I915_WRITE(reg, dpll);
1642         }
1643
1644         /* We do this three times for luck */
1645         I915_WRITE(reg, dpll);
1646         POSTING_READ(reg);
1647         udelay(150); /* wait for warmup */
1648         I915_WRITE(reg, dpll);
1649         POSTING_READ(reg);
1650         udelay(150); /* wait for warmup */
1651         I915_WRITE(reg, dpll);
1652         POSTING_READ(reg);
1653         udelay(150); /* wait for warmup */
1654 }
1655
1656 /**
1657  * i9xx_disable_pll - disable a PLL
1658  * @dev_priv: i915 private structure
1659  * @pipe: pipe PLL to disable
1660  *
1661  * Disable the PLL for @pipe, making sure the pipe is off first.
1662  *
1663  * Note!  This is for pre-ILK only.
1664  */
1665 static void i9xx_disable_pll(struct intel_crtc *crtc)
1666 {
1667         struct drm_device *dev = crtc->base.dev;
1668         struct drm_i915_private *dev_priv = dev->dev_private;
1669         enum pipe pipe = crtc->pipe;
1670
1671         /* Disable DVO 2x clock on both PLLs if necessary */
1672         if (IS_I830(dev) &&
1673             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1674             intel_num_dvo_pipes(dev) == 1) {
1675                 I915_WRITE(DPLL(PIPE_B),
1676                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1677                 I915_WRITE(DPLL(PIPE_A),
1678                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1679         }
1680
1681         /* Don't disable pipe or pipe PLLs if needed */
1682         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1683             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1684                 return;
1685
1686         /* Make sure the pipe isn't still relying on us */
1687         assert_pipe_disabled(dev_priv, pipe);
1688
1689         I915_WRITE(DPLL(pipe), 0);
1690         POSTING_READ(DPLL(pipe));
1691 }
1692
1693 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1694 {
1695         u32 val = 0;
1696
1697         /* Make sure the pipe isn't still relying on us */
1698         assert_pipe_disabled(dev_priv, pipe);
1699
1700         /*
1701          * Leave integrated clock source and reference clock enabled for pipe B.
1702          * The latter is needed for VGA hotplug / manual detection.
1703          */
1704         if (pipe == PIPE_B)
1705                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1706         I915_WRITE(DPLL(pipe), val);
1707         POSTING_READ(DPLL(pipe));
1708
1709 }
1710
1711 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 {
1713         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1714         u32 val;
1715
1716         /* Make sure the pipe isn't still relying on us */
1717         assert_pipe_disabled(dev_priv, pipe);
1718
1719         /* Set PLL en = 0 */
1720         val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1721         if (pipe != PIPE_A)
1722                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1723         I915_WRITE(DPLL(pipe), val);
1724         POSTING_READ(DPLL(pipe));
1725
1726         mutex_lock(&dev_priv->dpio_lock);
1727
1728         /* Disable 10bit clock to display controller */
1729         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1730         val &= ~DPIO_DCLKP_EN;
1731         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1732
1733         /* disable left/right clock distribution */
1734         if (pipe != PIPE_B) {
1735                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1736                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1737                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1738         } else {
1739                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1740                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1741                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1742         }
1743
1744         mutex_unlock(&dev_priv->dpio_lock);
1745 }
1746
1747 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1748                 struct intel_digital_port *dport)
1749 {
1750         u32 port_mask;
1751         int dpll_reg;
1752
1753         switch (dport->port) {
1754         case PORT_B:
1755                 port_mask = DPLL_PORTB_READY_MASK;
1756                 dpll_reg = DPLL(0);
1757                 break;
1758         case PORT_C:
1759                 port_mask = DPLL_PORTC_READY_MASK;
1760                 dpll_reg = DPLL(0);
1761                 break;
1762         case PORT_D:
1763                 port_mask = DPLL_PORTD_READY_MASK;
1764                 dpll_reg = DPIO_PHY_STATUS;
1765                 break;
1766         default:
1767                 BUG();
1768         }
1769
1770         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1771                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1772                      port_name(dport->port), I915_READ(dpll_reg));
1773 }
1774
1775 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1776 {
1777         struct drm_device *dev = crtc->base.dev;
1778         struct drm_i915_private *dev_priv = dev->dev_private;
1779         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1780
1781         if (WARN_ON(pll == NULL))
1782                 return;
1783
1784         WARN_ON(!pll->config.crtc_mask);
1785         if (pll->active == 0) {
1786                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1787                 WARN_ON(pll->on);
1788                 assert_shared_dpll_disabled(dev_priv, pll);
1789
1790                 pll->mode_set(dev_priv, pll);
1791         }
1792 }
1793
1794 /**
1795  * intel_enable_shared_dpll - enable PCH PLL
1796  * @dev_priv: i915 private structure
1797  * @pipe: pipe PLL to enable
1798  *
1799  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1800  * drives the transcoder clock.
1801  */
1802 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1803 {
1804         struct drm_device *dev = crtc->base.dev;
1805         struct drm_i915_private *dev_priv = dev->dev_private;
1806         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1807
1808         if (WARN_ON(pll == NULL))
1809                 return;
1810
1811         if (WARN_ON(pll->config.crtc_mask == 0))
1812                 return;
1813
1814         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1815                       pll->name, pll->active, pll->on,
1816                       crtc->base.base.id);
1817
1818         if (pll->active++) {
1819                 WARN_ON(!pll->on);
1820                 assert_shared_dpll_enabled(dev_priv, pll);
1821                 return;
1822         }
1823         WARN_ON(pll->on);
1824
1825         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1826
1827         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1828         pll->enable(dev_priv, pll);
1829         pll->on = true;
1830 }
1831
1832 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1833 {
1834         struct drm_device *dev = crtc->base.dev;
1835         struct drm_i915_private *dev_priv = dev->dev_private;
1836         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1837
1838         /* PCH only available on ILK+ */
1839         BUG_ON(INTEL_INFO(dev)->gen < 5);
1840         if (WARN_ON(pll == NULL))
1841                return;
1842
1843         if (WARN_ON(pll->config.crtc_mask == 0))
1844                 return;
1845
1846         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1847                       pll->name, pll->active, pll->on,
1848                       crtc->base.base.id);
1849
1850         if (WARN_ON(pll->active == 0)) {
1851                 assert_shared_dpll_disabled(dev_priv, pll);
1852                 return;
1853         }
1854
1855         assert_shared_dpll_enabled(dev_priv, pll);
1856         WARN_ON(!pll->on);
1857         if (--pll->active)
1858                 return;
1859
1860         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1861         pll->disable(dev_priv, pll);
1862         pll->on = false;
1863
1864         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1865 }
1866
1867 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1868                                            enum pipe pipe)
1869 {
1870         struct drm_device *dev = dev_priv->dev;
1871         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1872         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1873         uint32_t reg, val, pipeconf_val;
1874
1875         /* PCH only available on ILK+ */
1876         BUG_ON(!HAS_PCH_SPLIT(dev));
1877
1878         /* Make sure PCH DPLL is enabled */
1879         assert_shared_dpll_enabled(dev_priv,
1880                                    intel_crtc_to_shared_dpll(intel_crtc));
1881
1882         /* FDI must be feeding us bits for PCH ports */
1883         assert_fdi_tx_enabled(dev_priv, pipe);
1884         assert_fdi_rx_enabled(dev_priv, pipe);
1885
1886         if (HAS_PCH_CPT(dev)) {
1887                 /* Workaround: Set the timing override bit before enabling the
1888                  * pch transcoder. */
1889                 reg = TRANS_CHICKEN2(pipe);
1890                 val = I915_READ(reg);
1891                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1892                 I915_WRITE(reg, val);
1893         }
1894
1895         reg = PCH_TRANSCONF(pipe);
1896         val = I915_READ(reg);
1897         pipeconf_val = I915_READ(PIPECONF(pipe));
1898
1899         if (HAS_PCH_IBX(dev_priv->dev)) {
1900                 /*
1901                  * make the BPC in transcoder be consistent with
1902                  * that in pipeconf reg.
1903                  */
1904                 val &= ~PIPECONF_BPC_MASK;
1905                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1906         }
1907
1908         val &= ~TRANS_INTERLACE_MASK;
1909         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1910                 if (HAS_PCH_IBX(dev_priv->dev) &&
1911                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1912                         val |= TRANS_LEGACY_INTERLACED_ILK;
1913                 else
1914                         val |= TRANS_INTERLACED;
1915         else
1916                 val |= TRANS_PROGRESSIVE;
1917
1918         I915_WRITE(reg, val | TRANS_ENABLE);
1919         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1920                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1921 }
1922
1923 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1924                                       enum transcoder cpu_transcoder)
1925 {
1926         u32 val, pipeconf_val;
1927
1928         /* PCH only available on ILK+ */
1929         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1930
1931         /* FDI must be feeding us bits for PCH ports */
1932         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1933         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1934
1935         /* Workaround: set timing override bit. */
1936         val = I915_READ(_TRANSA_CHICKEN2);
1937         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1938         I915_WRITE(_TRANSA_CHICKEN2, val);
1939
1940         val = TRANS_ENABLE;
1941         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1942
1943         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1944             PIPECONF_INTERLACED_ILK)
1945                 val |= TRANS_INTERLACED;
1946         else
1947                 val |= TRANS_PROGRESSIVE;
1948
1949         I915_WRITE(LPT_TRANSCONF, val);
1950         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1951                 DRM_ERROR("Failed to enable PCH transcoder\n");
1952 }
1953
1954 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1955                                             enum pipe pipe)
1956 {
1957         struct drm_device *dev = dev_priv->dev;
1958         uint32_t reg, val;
1959
1960         /* FDI relies on the transcoder */
1961         assert_fdi_tx_disabled(dev_priv, pipe);
1962         assert_fdi_rx_disabled(dev_priv, pipe);
1963
1964         /* Ports must be off as well */
1965         assert_pch_ports_disabled(dev_priv, pipe);
1966
1967         reg = PCH_TRANSCONF(pipe);
1968         val = I915_READ(reg);
1969         val &= ~TRANS_ENABLE;
1970         I915_WRITE(reg, val);
1971         /* wait for PCH transcoder off, transcoder state */
1972         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1973                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1974
1975         if (!HAS_PCH_IBX(dev)) {
1976                 /* Workaround: Clear the timing override chicken bit again. */
1977                 reg = TRANS_CHICKEN2(pipe);
1978                 val = I915_READ(reg);
1979                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1980                 I915_WRITE(reg, val);
1981         }
1982 }
1983
1984 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1985 {
1986         u32 val;
1987
1988         val = I915_READ(LPT_TRANSCONF);
1989         val &= ~TRANS_ENABLE;
1990         I915_WRITE(LPT_TRANSCONF, val);
1991         /* wait for PCH transcoder off, transcoder state */
1992         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1993                 DRM_ERROR("Failed to disable PCH transcoder\n");
1994
1995         /* Workaround: clear timing override bit. */
1996         val = I915_READ(_TRANSA_CHICKEN2);
1997         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1998         I915_WRITE(_TRANSA_CHICKEN2, val);
1999 }
2000
2001 /**
2002  * intel_enable_pipe - enable a pipe, asserting requirements
2003  * @crtc: crtc responsible for the pipe
2004  *
2005  * Enable @crtc's pipe, making sure that various hardware specific requirements
2006  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2007  */
2008 static void intel_enable_pipe(struct intel_crtc *crtc)
2009 {
2010         struct drm_device *dev = crtc->base.dev;
2011         struct drm_i915_private *dev_priv = dev->dev_private;
2012         enum pipe pipe = crtc->pipe;
2013         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2014                                                                       pipe);
2015         enum pipe pch_transcoder;
2016         int reg;
2017         u32 val;
2018
2019         assert_planes_disabled(dev_priv, pipe);
2020         assert_cursor_disabled(dev_priv, pipe);
2021         assert_sprites_disabled(dev_priv, pipe);
2022
2023         if (HAS_PCH_LPT(dev_priv->dev))
2024                 pch_transcoder = TRANSCODER_A;
2025         else
2026                 pch_transcoder = pipe;
2027
2028         /*
2029          * A pipe without a PLL won't actually be able to drive bits from
2030          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2031          * need the check.
2032          */
2033         if (!HAS_PCH_SPLIT(dev_priv->dev))
2034                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2035                         assert_dsi_pll_enabled(dev_priv);
2036                 else
2037                         assert_pll_enabled(dev_priv, pipe);
2038         else {
2039                 if (crtc->config->has_pch_encoder) {
2040                         /* if driving the PCH, we need FDI enabled */
2041                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2042                         assert_fdi_tx_pll_enabled(dev_priv,
2043                                                   (enum pipe) cpu_transcoder);
2044                 }
2045                 /* FIXME: assert CPU port conditions for SNB+ */
2046         }
2047
2048         reg = PIPECONF(cpu_transcoder);
2049         val = I915_READ(reg);
2050         if (val & PIPECONF_ENABLE) {
2051                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2052                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2053                 return;
2054         }
2055
2056         I915_WRITE(reg, val | PIPECONF_ENABLE);
2057         POSTING_READ(reg);
2058 }
2059
2060 /**
2061  * intel_disable_pipe - disable a pipe, asserting requirements
2062  * @crtc: crtc whose pipes is to be disabled
2063  *
2064  * Disable the pipe of @crtc, making sure that various hardware
2065  * specific requirements are met, if applicable, e.g. plane
2066  * disabled, panel fitter off, etc.
2067  *
2068  * Will wait until the pipe has shut down before returning.
2069  */
2070 static void intel_disable_pipe(struct intel_crtc *crtc)
2071 {
2072         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2073         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2074         enum pipe pipe = crtc->pipe;
2075         int reg;
2076         u32 val;
2077
2078         /*
2079          * Make sure planes won't keep trying to pump pixels to us,
2080          * or we might hang the display.
2081          */
2082         assert_planes_disabled(dev_priv, pipe);
2083         assert_cursor_disabled(dev_priv, pipe);
2084         assert_sprites_disabled(dev_priv, pipe);
2085
2086         reg = PIPECONF(cpu_transcoder);
2087         val = I915_READ(reg);
2088         if ((val & PIPECONF_ENABLE) == 0)
2089                 return;
2090
2091         /*
2092          * Double wide has implications for planes
2093          * so best keep it disabled when not needed.
2094          */
2095         if (crtc->config->double_wide)
2096                 val &= ~PIPECONF_DOUBLE_WIDE;
2097
2098         /* Don't disable pipe or pipe PLLs if needed */
2099         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2100             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2101                 val &= ~PIPECONF_ENABLE;
2102
2103         I915_WRITE(reg, val);
2104         if ((val & PIPECONF_ENABLE) == 0)
2105                 intel_wait_for_pipe_off(crtc);
2106 }
2107
2108 /*
2109  * Plane regs are double buffered, going from enabled->disabled needs a
2110  * trigger in order to latch.  The display address reg provides this.
2111  */
2112 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2113                                enum plane plane)
2114 {
2115         struct drm_device *dev = dev_priv->dev;
2116         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2117
2118         I915_WRITE(reg, I915_READ(reg));
2119         POSTING_READ(reg);
2120 }
2121
2122 /**
2123  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2124  * @plane:  plane to be enabled
2125  * @crtc: crtc for the plane
2126  *
2127  * Enable @plane on @crtc, making sure that the pipe is running first.
2128  */
2129 static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2130                                           struct drm_crtc *crtc)
2131 {
2132         struct drm_device *dev = plane->dev;
2133         struct drm_i915_private *dev_priv = dev->dev_private;
2134         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2135
2136         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2137         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2138
2139         if (intel_crtc->primary_enabled)
2140                 return;
2141
2142         intel_crtc->primary_enabled = true;
2143
2144         dev_priv->display.update_primary_plane(crtc, plane->fb,
2145                                                crtc->x, crtc->y);
2146
2147         /*
2148          * BDW signals flip done immediately if the plane
2149          * is disabled, even if the plane enable is already
2150          * armed to occur at the next vblank :(
2151          */
2152         if (IS_BROADWELL(dev))
2153                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2154 }
2155
2156 /**
2157  * intel_disable_primary_hw_plane - disable the primary hardware plane
2158  * @plane: plane to be disabled
2159  * @crtc: crtc for the plane
2160  *
2161  * Disable @plane on @crtc, making sure that the pipe is running first.
2162  */
2163 static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2164                                            struct drm_crtc *crtc)
2165 {
2166         struct drm_device *dev = plane->dev;
2167         struct drm_i915_private *dev_priv = dev->dev_private;
2168         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2169
2170         if (WARN_ON(!intel_crtc->active))
2171                 return;
2172
2173         if (!intel_crtc->primary_enabled)
2174                 return;
2175
2176         intel_crtc->primary_enabled = false;
2177
2178         dev_priv->display.update_primary_plane(crtc, plane->fb,
2179                                                crtc->x, crtc->y);
2180 }
2181
2182 static bool need_vtd_wa(struct drm_device *dev)
2183 {
2184 #ifdef CONFIG_INTEL_IOMMU
2185         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2186                 return true;
2187 #endif
2188         return false;
2189 }
2190
2191 int
2192 intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
2193 {
2194         int tile_height;
2195
2196         tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
2197         return ALIGN(height, tile_height);
2198 }
2199
2200 int
2201 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2202                            struct drm_framebuffer *fb,
2203                            struct intel_engine_cs *pipelined)
2204 {
2205         struct drm_device *dev = fb->dev;
2206         struct drm_i915_private *dev_priv = dev->dev_private;
2207         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2208         u32 alignment;
2209         int ret;
2210
2211         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2212
2213         switch (obj->tiling_mode) {
2214         case I915_TILING_NONE:
2215                 if (INTEL_INFO(dev)->gen >= 9)
2216                         alignment = 256 * 1024;
2217                 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2218                         alignment = 128 * 1024;
2219                 else if (INTEL_INFO(dev)->gen >= 4)
2220                         alignment = 4 * 1024;
2221                 else
2222                         alignment = 64 * 1024;
2223                 break;
2224         case I915_TILING_X:
2225                 if (INTEL_INFO(dev)->gen >= 9)
2226                         alignment = 256 * 1024;
2227                 else {
2228                         /* pin() will align the object as required by fence */
2229                         alignment = 0;
2230                 }
2231                 break;
2232         case I915_TILING_Y:
2233                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2234                 return -EINVAL;
2235         default:
2236                 BUG();
2237         }
2238
2239         /* Note that the w/a also requires 64 PTE of padding following the
2240          * bo. We currently fill all unused PTE with the shadow page and so
2241          * we should always have valid PTE following the scanout preventing
2242          * the VT-d warning.
2243          */
2244         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245                 alignment = 256 * 1024;
2246
2247         /*
2248          * Global gtt pte registers are special registers which actually forward
2249          * writes to a chunk of system memory. Which means that there is no risk
2250          * that the register values disappear as soon as we call
2251          * intel_runtime_pm_put(), so it is correct to wrap only the
2252          * pin/unpin/fence and not more.
2253          */
2254         intel_runtime_pm_get(dev_priv);
2255
2256         dev_priv->mm.interruptible = false;
2257         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2258         if (ret)
2259                 goto err_interruptible;
2260
2261         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2262          * fence, whereas 965+ only requires a fence if using
2263          * framebuffer compression.  For simplicity, we always install
2264          * a fence as the cost is not that onerous.
2265          */
2266         ret = i915_gem_object_get_fence(obj);
2267         if (ret)
2268                 goto err_unpin;
2269
2270         i915_gem_object_pin_fence(obj);
2271
2272         dev_priv->mm.interruptible = true;
2273         intel_runtime_pm_put(dev_priv);
2274         return 0;
2275
2276 err_unpin:
2277         i915_gem_object_unpin_from_display_plane(obj);
2278 err_interruptible:
2279         dev_priv->mm.interruptible = true;
2280         intel_runtime_pm_put(dev_priv);
2281         return ret;
2282 }
2283
2284 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2285 {
2286         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2287
2288         i915_gem_object_unpin_fence(obj);
2289         i915_gem_object_unpin_from_display_plane(obj);
2290 }
2291
2292 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2293  * is assumed to be a power-of-two. */
2294 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2295                                              unsigned int tiling_mode,
2296                                              unsigned int cpp,
2297                                              unsigned int pitch)
2298 {
2299         if (tiling_mode != I915_TILING_NONE) {
2300                 unsigned int tile_rows, tiles;
2301
2302                 tile_rows = *y / 8;
2303                 *y %= 8;
2304
2305                 tiles = *x / (512/cpp);
2306                 *x %= 512/cpp;
2307
2308                 return tile_rows * pitch * 8 + tiles * 4096;
2309         } else {
2310                 unsigned int offset;
2311
2312                 offset = *y * pitch + *x * cpp;
2313                 *y = 0;
2314                 *x = (offset & 4095) / cpp;
2315                 return offset & -4096;
2316         }
2317 }
2318
2319 static int i9xx_format_to_fourcc(int format)
2320 {
2321         switch (format) {
2322         case DISPPLANE_8BPP:
2323                 return DRM_FORMAT_C8;
2324         case DISPPLANE_BGRX555:
2325                 return DRM_FORMAT_XRGB1555;
2326         case DISPPLANE_BGRX565:
2327                 return DRM_FORMAT_RGB565;
2328         default:
2329         case DISPPLANE_BGRX888:
2330                 return DRM_FORMAT_XRGB8888;
2331         case DISPPLANE_RGBX888:
2332                 return DRM_FORMAT_XBGR8888;
2333         case DISPPLANE_BGRX101010:
2334                 return DRM_FORMAT_XRGB2101010;
2335         case DISPPLANE_RGBX101010:
2336                 return DRM_FORMAT_XBGR2101010;
2337         }
2338 }
2339
2340 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2341 {
2342         switch (format) {
2343         case PLANE_CTL_FORMAT_RGB_565:
2344                 return DRM_FORMAT_RGB565;
2345         default:
2346         case PLANE_CTL_FORMAT_XRGB_8888:
2347                 if (rgb_order) {
2348                         if (alpha)
2349                                 return DRM_FORMAT_ABGR8888;
2350                         else
2351                                 return DRM_FORMAT_XBGR8888;
2352                 } else {
2353                         if (alpha)
2354                                 return DRM_FORMAT_ARGB8888;
2355                         else
2356                                 return DRM_FORMAT_XRGB8888;
2357                 }
2358         case PLANE_CTL_FORMAT_XRGB_2101010:
2359                 if (rgb_order)
2360                         return DRM_FORMAT_XBGR2101010;
2361                 else
2362                         return DRM_FORMAT_XRGB2101010;
2363         }
2364 }
2365
2366 static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2367                                   struct intel_plane_config *plane_config)
2368 {
2369         struct drm_device *dev = crtc->base.dev;
2370         struct drm_i915_gem_object *obj = NULL;
2371         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2372         u32 base = plane_config->base;
2373
2374         if (plane_config->size == 0)
2375                 return false;
2376
2377         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2378                                                              plane_config->size);
2379         if (!obj)
2380                 return false;
2381
2382         obj->tiling_mode = plane_config->tiling;
2383         if (obj->tiling_mode == I915_TILING_X)
2384                 obj->stride = crtc->base.primary->fb->pitches[0];
2385
2386         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2387         mode_cmd.width = crtc->base.primary->fb->width;
2388         mode_cmd.height = crtc->base.primary->fb->height;
2389         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2390
2391         mutex_lock(&dev->struct_mutex);
2392
2393         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2394                                    &mode_cmd, obj)) {
2395                 DRM_DEBUG_KMS("intel fb init failed\n");
2396                 goto out_unref_obj;
2397         }
2398
2399         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2400         mutex_unlock(&dev->struct_mutex);
2401
2402         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2403         return true;
2404
2405 out_unref_obj:
2406         drm_gem_object_unreference(&obj->base);
2407         mutex_unlock(&dev->struct_mutex);
2408         return false;
2409 }
2410
2411 static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2412                                  struct intel_plane_config *plane_config)
2413 {
2414         struct drm_device *dev = intel_crtc->base.dev;
2415         struct drm_i915_private *dev_priv = dev->dev_private;
2416         struct drm_crtc *c;
2417         struct intel_crtc *i;
2418         struct drm_i915_gem_object *obj;
2419
2420         if (!intel_crtc->base.primary->fb)
2421                 return;
2422
2423         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2424                 return;
2425
2426         kfree(intel_crtc->base.primary->fb);
2427         intel_crtc->base.primary->fb = NULL;
2428
2429         /*
2430          * Failed to alloc the obj, check to see if we should share
2431          * an fb with another CRTC instead
2432          */
2433         for_each_crtc(dev, c) {
2434                 i = to_intel_crtc(c);
2435
2436                 if (c == &intel_crtc->base)
2437                         continue;
2438
2439                 if (!i->active)
2440                         continue;
2441
2442                 obj = intel_fb_obj(c->primary->fb);
2443                 if (obj == NULL)
2444                         continue;
2445
2446                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2447                         if (obj->tiling_mode != I915_TILING_NONE)
2448                                 dev_priv->preserve_bios_swizzle = true;
2449
2450                         drm_framebuffer_reference(c->primary->fb);
2451                         intel_crtc->base.primary->fb = c->primary->fb;
2452                         obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2453                         break;
2454                 }
2455         }
2456 }
2457
2458 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2459                                       struct drm_framebuffer *fb,
2460                                       int x, int y)
2461 {
2462         struct drm_device *dev = crtc->dev;
2463         struct drm_i915_private *dev_priv = dev->dev_private;
2464         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2465         struct drm_i915_gem_object *obj;
2466         int plane = intel_crtc->plane;
2467         unsigned long linear_offset;
2468         u32 dspcntr;
2469         u32 reg = DSPCNTR(plane);
2470         int pixel_size;
2471
2472         if (!intel_crtc->primary_enabled) {
2473                 I915_WRITE(reg, 0);
2474                 if (INTEL_INFO(dev)->gen >= 4)
2475                         I915_WRITE(DSPSURF(plane), 0);
2476                 else
2477                         I915_WRITE(DSPADDR(plane), 0);
2478                 POSTING_READ(reg);
2479                 return;
2480         }
2481
2482         obj = intel_fb_obj(fb);
2483         if (WARN_ON(obj == NULL))
2484                 return;
2485
2486         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2487
2488         dspcntr = DISPPLANE_GAMMA_ENABLE;
2489
2490         dspcntr |= DISPLAY_PLANE_ENABLE;
2491
2492         if (INTEL_INFO(dev)->gen < 4) {
2493                 if (intel_crtc->pipe == PIPE_B)
2494                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2495
2496                 /* pipesrc and dspsize control the size that is scaled from,
2497                  * which should always be the user's requested size.
2498                  */
2499                 I915_WRITE(DSPSIZE(plane),
2500                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2501                            (intel_crtc->config->pipe_src_w - 1));
2502                 I915_WRITE(DSPPOS(plane), 0);
2503         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2504                 I915_WRITE(PRIMSIZE(plane),
2505                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2506                            (intel_crtc->config->pipe_src_w - 1));
2507                 I915_WRITE(PRIMPOS(plane), 0);
2508                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2509         }
2510
2511         switch (fb->pixel_format) {
2512         case DRM_FORMAT_C8:
2513                 dspcntr |= DISPPLANE_8BPP;
2514                 break;
2515         case DRM_FORMAT_XRGB1555:
2516         case DRM_FORMAT_ARGB1555:
2517                 dspcntr |= DISPPLANE_BGRX555;
2518                 break;
2519         case DRM_FORMAT_RGB565:
2520                 dspcntr |= DISPPLANE_BGRX565;
2521                 break;
2522         case DRM_FORMAT_XRGB8888:
2523         case DRM_FORMAT_ARGB8888:
2524                 dspcntr |= DISPPLANE_BGRX888;
2525                 break;
2526         case DRM_FORMAT_XBGR8888:
2527         case DRM_FORMAT_ABGR8888:
2528                 dspcntr |= DISPPLANE_RGBX888;
2529                 break;
2530         case DRM_FORMAT_XRGB2101010:
2531         case DRM_FORMAT_ARGB2101010:
2532                 dspcntr |= DISPPLANE_BGRX101010;
2533                 break;
2534         case DRM_FORMAT_XBGR2101010:
2535         case DRM_FORMAT_ABGR2101010:
2536                 dspcntr |= DISPPLANE_RGBX101010;
2537                 break;
2538         default:
2539                 BUG();
2540         }
2541
2542         if (INTEL_INFO(dev)->gen >= 4 &&
2543             obj->tiling_mode != I915_TILING_NONE)
2544                 dspcntr |= DISPPLANE_TILED;
2545
2546         if (IS_G4X(dev))
2547                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2548
2549         linear_offset = y * fb->pitches[0] + x * pixel_size;
2550
2551         if (INTEL_INFO(dev)->gen >= 4) {
2552                 intel_crtc->dspaddr_offset =
2553                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2554                                                        pixel_size,
2555                                                        fb->pitches[0]);
2556                 linear_offset -= intel_crtc->dspaddr_offset;
2557         } else {
2558                 intel_crtc->dspaddr_offset = linear_offset;
2559         }
2560
2561         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2562                 dspcntr |= DISPPLANE_ROTATE_180;
2563
2564                 x += (intel_crtc->config->pipe_src_w - 1);
2565                 y += (intel_crtc->config->pipe_src_h - 1);
2566
2567                 /* Finding the last pixel of the last line of the display
2568                 data and adding to linear_offset*/
2569                 linear_offset +=
2570                         (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2571                         (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2572         }
2573
2574         I915_WRITE(reg, dspcntr);
2575
2576         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2577                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2578                       fb->pitches[0]);
2579         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2580         if (INTEL_INFO(dev)->gen >= 4) {
2581                 I915_WRITE(DSPSURF(plane),
2582                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2583                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2584                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2585         } else
2586                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2587         POSTING_READ(reg);
2588 }
2589
2590 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2591                                           struct drm_framebuffer *fb,
2592                                           int x, int y)
2593 {
2594         struct drm_device *dev = crtc->dev;
2595         struct drm_i915_private *dev_priv = dev->dev_private;
2596         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2597         struct drm_i915_gem_object *obj;
2598         int plane = intel_crtc->plane;
2599         unsigned long linear_offset;
2600         u32 dspcntr;
2601         u32 reg = DSPCNTR(plane);
2602         int pixel_size;
2603
2604         if (!intel_crtc->primary_enabled) {
2605                 I915_WRITE(reg, 0);
2606                 I915_WRITE(DSPSURF(plane), 0);
2607                 POSTING_READ(reg);
2608                 return;
2609         }
2610
2611         obj = intel_fb_obj(fb);
2612         if (WARN_ON(obj == NULL))
2613                 return;
2614
2615         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2616
2617         dspcntr = DISPPLANE_GAMMA_ENABLE;
2618
2619         dspcntr |= DISPLAY_PLANE_ENABLE;
2620
2621         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2622                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2623
2624         switch (fb->pixel_format) {
2625         case DRM_FORMAT_C8:
2626                 dspcntr |= DISPPLANE_8BPP;
2627                 break;
2628         case DRM_FORMAT_RGB565:
2629                 dspcntr |= DISPPLANE_BGRX565;
2630                 break;
2631         case DRM_FORMAT_XRGB8888:
2632         case DRM_FORMAT_ARGB8888:
2633                 dspcntr |= DISPPLANE_BGRX888;
2634                 break;
2635         case DRM_FORMAT_XBGR8888:
2636         case DRM_FORMAT_ABGR8888:
2637                 dspcntr |= DISPPLANE_RGBX888;
2638                 break;
2639         case DRM_FORMAT_XRGB2101010:
2640         case DRM_FORMAT_ARGB2101010:
2641                 dspcntr |= DISPPLANE_BGRX101010;
2642                 break;
2643         case DRM_FORMAT_XBGR2101010:
2644         case DRM_FORMAT_ABGR2101010:
2645                 dspcntr |= DISPPLANE_RGBX101010;
2646                 break;
2647         default:
2648                 BUG();
2649         }
2650
2651         if (obj->tiling_mode != I915_TILING_NONE)
2652                 dspcntr |= DISPPLANE_TILED;
2653
2654         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2655                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2656
2657         linear_offset = y * fb->pitches[0] + x * pixel_size;
2658         intel_crtc->dspaddr_offset =
2659                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2660                                                pixel_size,
2661                                                fb->pitches[0]);
2662         linear_offset -= intel_crtc->dspaddr_offset;
2663         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2664                 dspcntr |= DISPPLANE_ROTATE_180;
2665
2666                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2667                         x += (intel_crtc->config->pipe_src_w - 1);
2668                         y += (intel_crtc->config->pipe_src_h - 1);
2669
2670                         /* Finding the last pixel of the last line of the display
2671                         data and adding to linear_offset*/
2672                         linear_offset +=
2673                                 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2674                                 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2675                 }
2676         }
2677
2678         I915_WRITE(reg, dspcntr);
2679
2680         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2681                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2682                       fb->pitches[0]);
2683         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2684         I915_WRITE(DSPSURF(plane),
2685                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2686         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2687                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2688         } else {
2689                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2690                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2691         }
2692         POSTING_READ(reg);
2693 }
2694
2695 static void skylake_update_primary_plane(struct drm_crtc *crtc,
2696                                          struct drm_framebuffer *fb,
2697                                          int x, int y)
2698 {
2699         struct drm_device *dev = crtc->dev;
2700         struct drm_i915_private *dev_priv = dev->dev_private;
2701         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2702         struct intel_framebuffer *intel_fb;
2703         struct drm_i915_gem_object *obj;
2704         int pipe = intel_crtc->pipe;
2705         u32 plane_ctl, stride;
2706
2707         if (!intel_crtc->primary_enabled) {
2708                 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2709                 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2710                 POSTING_READ(PLANE_CTL(pipe, 0));
2711                 return;
2712         }
2713
2714         plane_ctl = PLANE_CTL_ENABLE |
2715                     PLANE_CTL_PIPE_GAMMA_ENABLE |
2716                     PLANE_CTL_PIPE_CSC_ENABLE;
2717
2718         switch (fb->pixel_format) {
2719         case DRM_FORMAT_RGB565:
2720                 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2721                 break;
2722         case DRM_FORMAT_XRGB8888:
2723                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2724                 break;
2725         case DRM_FORMAT_XBGR8888:
2726                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2727                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2728                 break;
2729         case DRM_FORMAT_XRGB2101010:
2730                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2731                 break;
2732         case DRM_FORMAT_XBGR2101010:
2733                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2734                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2735                 break;
2736         default:
2737                 BUG();
2738         }
2739
2740         intel_fb = to_intel_framebuffer(fb);
2741         obj = intel_fb->obj;
2742
2743         /*
2744          * The stride is either expressed as a multiple of 64 bytes chunks for
2745          * linear buffers or in number of tiles for tiled buffers.
2746          */
2747         switch (obj->tiling_mode) {
2748         case I915_TILING_NONE:
2749                 stride = fb->pitches[0] >> 6;
2750                 break;
2751         case I915_TILING_X:
2752                 plane_ctl |= PLANE_CTL_TILED_X;
2753                 stride = fb->pitches[0] >> 9;
2754                 break;
2755         default:
2756                 BUG();
2757         }
2758
2759         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2760         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2761                 plane_ctl |= PLANE_CTL_ROTATE_180;
2762
2763         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2764
2765         DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2766                       i915_gem_obj_ggtt_offset(obj),
2767                       x, y, fb->width, fb->height,
2768                       fb->pitches[0]);
2769
2770         I915_WRITE(PLANE_POS(pipe, 0), 0);
2771         I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2772         I915_WRITE(PLANE_SIZE(pipe, 0),
2773                    (intel_crtc->config->pipe_src_h - 1) << 16 |
2774                    (intel_crtc->config->pipe_src_w - 1));
2775         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2776         I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2777
2778         POSTING_READ(PLANE_SURF(pipe, 0));
2779 }
2780
2781 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2782 static int
2783 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2784                            int x, int y, enum mode_set_atomic state)
2785 {
2786         struct drm_device *dev = crtc->dev;
2787         struct drm_i915_private *dev_priv = dev->dev_private;
2788
2789         if (dev_priv->display.disable_fbc)
2790                 dev_priv->display.disable_fbc(dev);
2791
2792         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2793
2794         return 0;
2795 }
2796
2797 static void intel_complete_page_flips(struct drm_device *dev)
2798 {
2799         struct drm_crtc *crtc;
2800
2801         for_each_crtc(dev, crtc) {
2802                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2803                 enum plane plane = intel_crtc->plane;
2804
2805                 intel_prepare_page_flip(dev, plane);
2806                 intel_finish_page_flip_plane(dev, plane);
2807         }
2808 }
2809
2810 static void intel_update_primary_planes(struct drm_device *dev)
2811 {
2812         struct drm_i915_private *dev_priv = dev->dev_private;
2813         struct drm_crtc *crtc;
2814
2815         for_each_crtc(dev, crtc) {
2816                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2817
2818                 drm_modeset_lock(&crtc->mutex, NULL);
2819                 /*
2820                  * FIXME: Once we have proper support for primary planes (and
2821                  * disabling them without disabling the entire crtc) allow again
2822                  * a NULL crtc->primary->fb.
2823                  */
2824                 if (intel_crtc->active && crtc->primary->fb)
2825                         dev_priv->display.update_primary_plane(crtc,
2826                                                                crtc->primary->fb,
2827                                                                crtc->x,
2828                                                                crtc->y);
2829                 drm_modeset_unlock(&crtc->mutex);
2830         }
2831 }
2832
2833 void intel_prepare_reset(struct drm_device *dev)
2834 {
2835         struct drm_i915_private *dev_priv = to_i915(dev);
2836         struct intel_crtc *crtc;
2837
2838         /* no reset support for gen2 */
2839         if (IS_GEN2(dev))
2840                 return;
2841
2842         /* reset doesn't touch the display */
2843         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2844                 return;
2845
2846         drm_modeset_lock_all(dev);
2847
2848         /*
2849          * Disabling the crtcs gracefully seems nicer. Also the
2850          * g33 docs say we should at least disable all the planes.
2851          */
2852         for_each_intel_crtc(dev, crtc) {
2853                 if (crtc->active)
2854                         dev_priv->display.crtc_disable(&crtc->base);
2855         }
2856 }
2857
2858 void intel_finish_reset(struct drm_device *dev)
2859 {
2860         struct drm_i915_private *dev_priv = to_i915(dev);
2861
2862         /*
2863          * Flips in the rings will be nuked by the reset,
2864          * so complete all pending flips so that user space
2865          * will get its events and not get stuck.
2866          */
2867         intel_complete_page_flips(dev);
2868
2869         /* no reset support for gen2 */
2870         if (IS_GEN2(dev))
2871                 return;
2872
2873         /* reset doesn't touch the display */
2874         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2875                 /*
2876                  * Flips in the rings have been nuked by the reset,
2877                  * so update the base address of all primary
2878                  * planes to the the last fb to make sure we're
2879                  * showing the correct fb after a reset.
2880                  */
2881                 intel_update_primary_planes(dev);
2882                 return;
2883         }
2884
2885         /*
2886          * The display has been reset as well,
2887          * so need a full re-initialization.
2888          */
2889         intel_runtime_pm_disable_interrupts(dev_priv);
2890         intel_runtime_pm_enable_interrupts(dev_priv);
2891
2892         intel_modeset_init_hw(dev);
2893
2894         spin_lock_irq(&dev_priv->irq_lock);
2895         if (dev_priv->display.hpd_irq_setup)
2896                 dev_priv->display.hpd_irq_setup(dev);
2897         spin_unlock_irq(&dev_priv->irq_lock);
2898
2899         intel_modeset_setup_hw_state(dev, true);
2900
2901         intel_hpd_init(dev_priv);
2902
2903         drm_modeset_unlock_all(dev);
2904 }
2905
2906 static int
2907 intel_finish_fb(struct drm_framebuffer *old_fb)
2908 {
2909         struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2910         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2911         bool was_interruptible = dev_priv->mm.interruptible;
2912         int ret;
2913
2914         /* Big Hammer, we also need to ensure that any pending
2915          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2916          * current scanout is retired before unpinning the old
2917          * framebuffer.
2918          *
2919          * This should only fail upon a hung GPU, in which case we
2920          * can safely continue.
2921          */
2922         dev_priv->mm.interruptible = false;
2923         ret = i915_gem_object_finish_gpu(obj);
2924         dev_priv->mm.interruptible = was_interruptible;
2925
2926         return ret;
2927 }
2928
2929 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2930 {
2931         struct drm_device *dev = crtc->dev;
2932         struct drm_i915_private *dev_priv = dev->dev_private;
2933         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2934         bool pending;
2935
2936         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2937             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2938                 return false;
2939
2940         spin_lock_irq(&dev->event_lock);
2941         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2942         spin_unlock_irq(&dev->event_lock);
2943
2944         return pending;
2945 }
2946
2947 static void intel_update_pipe_size(struct intel_crtc *crtc)
2948 {
2949         struct drm_device *dev = crtc->base.dev;
2950         struct drm_i915_private *dev_priv = dev->dev_private;
2951         const struct drm_display_mode *adjusted_mode;
2952
2953         if (!i915.fastboot)
2954                 return;
2955
2956         /*
2957          * Update pipe size and adjust fitter if needed: the reason for this is
2958          * that in compute_mode_changes we check the native mode (not the pfit
2959          * mode) to see if we can flip rather than do a full mode set. In the
2960          * fastboot case, we'll flip, but if we don't update the pipesrc and
2961          * pfit state, we'll end up with a big fb scanned out into the wrong
2962          * sized surface.
2963          *
2964          * To fix this properly, we need to hoist the checks up into
2965          * compute_mode_changes (or above), check the actual pfit state and
2966          * whether the platform allows pfit disable with pipe active, and only
2967          * then update the pipesrc and pfit state, even on the flip path.
2968          */
2969
2970         adjusted_mode = &crtc->config->base.adjusted_mode;
2971
2972         I915_WRITE(PIPESRC(crtc->pipe),
2973                    ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2974                    (adjusted_mode->crtc_vdisplay - 1));
2975         if (!crtc->config->pch_pfit.enabled &&
2976             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2977              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2978                 I915_WRITE(PF_CTL(crtc->pipe), 0);
2979                 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2980                 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2981         }
2982         crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
2983         crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
2984 }
2985
2986 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2987 {
2988         struct drm_device *dev = crtc->dev;
2989         struct drm_i915_private *dev_priv = dev->dev_private;
2990         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2991         int pipe = intel_crtc->pipe;
2992         u32 reg, temp;
2993
2994         /* enable normal train */
2995         reg = FDI_TX_CTL(pipe);
2996         temp = I915_READ(reg);
2997         if (IS_IVYBRIDGE(dev)) {
2998                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2999                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3000         } else {
3001                 temp &= ~FDI_LINK_TRAIN_NONE;
3002                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3003         }
3004         I915_WRITE(reg, temp);
3005
3006         reg = FDI_RX_CTL(pipe);
3007         temp = I915_READ(reg);
3008         if (HAS_PCH_CPT(dev)) {
3009                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3010                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3011         } else {
3012                 temp &= ~FDI_LINK_TRAIN_NONE;
3013                 temp |= FDI_LINK_TRAIN_NONE;
3014         }
3015         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3016
3017         /* wait one idle pattern time */
3018         POSTING_READ(reg);
3019         udelay(1000);
3020
3021         /* IVB wants error correction enabled */
3022         if (IS_IVYBRIDGE(dev))
3023                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3024                            FDI_FE_ERRC_ENABLE);
3025 }
3026
3027 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3028 {
3029         return crtc->base.enabled && crtc->active &&
3030                 crtc->config->has_pch_encoder;
3031 }
3032
3033 static void ivb_modeset_global_resources(struct drm_device *dev)
3034 {
3035         struct drm_i915_private *dev_priv = dev->dev_private;
3036         struct intel_crtc *pipe_B_crtc =
3037                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3038         struct intel_crtc *pipe_C_crtc =
3039                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3040         uint32_t temp;
3041
3042         /*
3043          * When everything is off disable fdi C so that we could enable fdi B
3044          * with all lanes. Note that we don't care about enabled pipes without
3045          * an enabled pch encoder.
3046          */
3047         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3048             !pipe_has_enabled_pch(pipe_C_crtc)) {
3049                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3050                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3051
3052                 temp = I915_READ(SOUTH_CHICKEN1);
3053                 temp &= ~FDI_BC_BIFURCATION_SELECT;
3054                 DRM_DEBUG_KMS("disabling fdi C rx\n");
3055                 I915_WRITE(SOUTH_CHICKEN1, temp);
3056         }
3057 }
3058
3059 /* The FDI link training functions for ILK/Ibexpeak. */
3060 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3061 {
3062         struct drm_device *dev = crtc->dev;
3063         struct drm_i915_private *dev_priv = dev->dev_private;
3064         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3065         int pipe = intel_crtc->pipe;
3066         u32 reg, temp, tries;
3067
3068         /* FDI needs bits from pipe first */
3069         assert_pipe_enabled(dev_priv, pipe);
3070
3071         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3072            for train result */
3073         reg = FDI_RX_IMR(pipe);
3074         temp = I915_READ(reg);
3075         temp &= ~FDI_RX_SYMBOL_LOCK;
3076         temp &= ~FDI_RX_BIT_LOCK;
3077         I915_WRITE(reg, temp);
3078         I915_READ(reg);
3079         udelay(150);
3080
3081         /* enable CPU FDI TX and PCH FDI RX */
3082         reg = FDI_TX_CTL(pipe);
3083         temp = I915_READ(reg);
3084         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3085         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3086         temp &= ~FDI_LINK_TRAIN_NONE;
3087         temp |= FDI_LINK_TRAIN_PATTERN_1;
3088         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3089
3090         reg = FDI_RX_CTL(pipe);
3091         temp = I915_READ(reg);
3092         temp &= ~FDI_LINK_TRAIN_NONE;
3093         temp |= FDI_LINK_TRAIN_PATTERN_1;
3094         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3095
3096         POSTING_READ(reg);
3097         udelay(150);
3098
3099         /* Ironlake workaround, enable clock pointer after FDI enable*/
3100         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3101         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3102                    FDI_RX_PHASE_SYNC_POINTER_EN);
3103
3104         reg = FDI_RX_IIR(pipe);
3105         for (tries = 0; tries < 5; tries++) {
3106                 temp = I915_READ(reg);
3107                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3108
3109                 if ((temp & FDI_RX_BIT_LOCK)) {
3110                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3111                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3112                         break;
3113                 }
3114         }
3115         if (tries == 5)
3116                 DRM_ERROR("FDI train 1 fail!\n");
3117
3118         /* Train 2 */
3119         reg = FDI_TX_CTL(pipe);
3120         temp = I915_READ(reg);
3121         temp &= ~FDI_LINK_TRAIN_NONE;
3122         temp |= FDI_LINK_TRAIN_PATTERN_2;
3123         I915_WRITE(reg, temp);
3124
3125         reg = FDI_RX_CTL(pipe);
3126         temp = I915_READ(reg);
3127         temp &= ~FDI_LINK_TRAIN_NONE;
3128         temp |= FDI_LINK_TRAIN_PATTERN_2;
3129         I915_WRITE(reg, temp);
3130
3131         POSTING_READ(reg);
3132         udelay(150);
3133
3134         reg = FDI_RX_IIR(pipe);
3135         for (tries = 0; tries < 5; tries++) {
3136                 temp = I915_READ(reg);
3137                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3138
3139                 if (temp & FDI_RX_SYMBOL_LOCK) {
3140                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3141                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3142                         break;
3143                 }
3144         }
3145         if (tries == 5)
3146                 DRM_ERROR("FDI train 2 fail!\n");
3147
3148         DRM_DEBUG_KMS("FDI train done\n");
3149
3150 }
3151
3152 static const int snb_b_fdi_train_param[] = {
3153         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3154         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3155         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3156         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3157 };
3158
3159 /* The FDI link training functions for SNB/Cougarpoint. */
3160 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3161 {
3162         struct drm_device *dev = crtc->dev;
3163         struct drm_i915_private *dev_priv = dev->dev_private;
3164         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3165         int pipe = intel_crtc->pipe;
3166         u32 reg, temp, i, retry;
3167
3168         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3169            for train result */
3170         reg = FDI_RX_IMR(pipe);
3171         temp = I915_READ(reg);
3172         temp &= ~FDI_RX_SYMBOL_LOCK;
3173         temp &= ~FDI_RX_BIT_LOCK;
3174         I915_WRITE(reg, temp);
3175
3176         POSTING_READ(reg);
3177         udelay(150);
3178
3179         /* enable CPU FDI TX and PCH FDI RX */
3180         reg = FDI_TX_CTL(pipe);
3181         temp = I915_READ(reg);
3182         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3183         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3184         temp &= ~FDI_LINK_TRAIN_NONE;
3185         temp |= FDI_LINK_TRAIN_PATTERN_1;
3186         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3187         /* SNB-B */
3188         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3189         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3190
3191         I915_WRITE(FDI_RX_MISC(pipe),
3192                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3193
3194         reg = FDI_RX_CTL(pipe);
3195         temp = I915_READ(reg);
3196         if (HAS_PCH_CPT(dev)) {
3197                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3198                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3199         } else {
3200                 temp &= ~FDI_LINK_TRAIN_NONE;
3201                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3202         }
3203         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3204
3205         POSTING_READ(reg);
3206         udelay(150);
3207
3208         for (i = 0; i < 4; i++) {
3209                 reg = FDI_TX_CTL(pipe);
3210                 temp = I915_READ(reg);
3211                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3212                 temp |= snb_b_fdi_train_param[i];
3213                 I915_WRITE(reg, temp);
3214
3215                 POSTING_READ(reg);
3216                 udelay(500);
3217
3218                 for (retry = 0; retry < 5; retry++) {
3219                         reg = FDI_RX_IIR(pipe);
3220                         temp = I915_READ(reg);
3221                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3222                         if (temp & FDI_RX_BIT_LOCK) {
3223                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3224                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3225                                 break;
3226                         }
3227                         udelay(50);
3228                 }
3229                 if (retry < 5)
3230                         break;
3231         }
3232         if (i == 4)
3233                 DRM_ERROR("FDI train 1 fail!\n");
3234
3235         /* Train 2 */
3236         reg = FDI_TX_CTL(pipe);
3237         temp = I915_READ(reg);
3238         temp &= ~FDI_LINK_TRAIN_NONE;
3239         temp |= FDI_LINK_TRAIN_PATTERN_2;
3240         if (IS_GEN6(dev)) {
3241                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3242                 /* SNB-B */
3243                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3244         }
3245         I915_WRITE(reg, temp);
3246
3247         reg = FDI_RX_CTL(pipe);
3248         temp = I915_READ(reg);
3249         if (HAS_PCH_CPT(dev)) {
3250                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3251                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3252         } else {
3253                 temp &= ~FDI_LINK_TRAIN_NONE;
3254                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3255         }
3256         I915_WRITE(reg, temp);
3257
3258         POSTING_READ(reg);
3259         udelay(150);
3260
3261         for (i = 0; i < 4; i++) {
3262                 reg = FDI_TX_CTL(pipe);
3263                 temp = I915_READ(reg);
3264                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3265                 temp |= snb_b_fdi_train_param[i];
3266                 I915_WRITE(reg, temp);
3267
3268                 POSTING_READ(reg);
3269                 udelay(500);
3270
3271                 for (retry = 0; retry < 5; retry++) {
3272                         reg = FDI_RX_IIR(pipe);
3273                         temp = I915_READ(reg);
3274                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3275                         if (temp & FDI_RX_SYMBOL_LOCK) {
3276                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3277                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3278                                 break;
3279                         }
3280                         udelay(50);
3281                 }
3282                 if (retry < 5)
3283                         break;
3284         }
3285         if (i == 4)
3286                 DRM_ERROR("FDI train 2 fail!\n");
3287
3288         DRM_DEBUG_KMS("FDI train done.\n");
3289 }
3290
3291 /* Manual link training for Ivy Bridge A0 parts */
3292 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3293 {
3294         struct drm_device *dev = crtc->dev;
3295         struct drm_i915_private *dev_priv = dev->dev_private;
3296         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3297         int pipe = intel_crtc->pipe;
3298         u32 reg, temp, i, j;
3299
3300         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3301            for train result */
3302         reg = FDI_RX_IMR(pipe);
3303         temp = I915_READ(reg);
3304         temp &= ~FDI_RX_SYMBOL_LOCK;
3305         temp &= ~FDI_RX_BIT_LOCK;
3306         I915_WRITE(reg, temp);
3307
3308         POSTING_READ(reg);
3309         udelay(150);
3310
3311         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3312                       I915_READ(FDI_RX_IIR(pipe)));
3313
3314         /* Try each vswing and preemphasis setting twice before moving on */
3315         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3316                 /* disable first in case we need to retry */
3317                 reg = FDI_TX_CTL(pipe);
3318                 temp = I915_READ(reg);
3319                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3320                 temp &= ~FDI_TX_ENABLE;
3321                 I915_WRITE(reg, temp);
3322
3323                 reg = FDI_RX_CTL(pipe);
3324                 temp = I915_READ(reg);
3325                 temp &= ~FDI_LINK_TRAIN_AUTO;
3326                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3327                 temp &= ~FDI_RX_ENABLE;
3328                 I915_WRITE(reg, temp);
3329
3330                 /* enable CPU FDI TX and PCH FDI RX */
3331                 reg = FDI_TX_CTL(pipe);
3332                 temp = I915_READ(reg);
3333                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3334                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3335                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3336                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3337                 temp |= snb_b_fdi_train_param[j/2];
3338                 temp |= FDI_COMPOSITE_SYNC;
3339                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3340
3341                 I915_WRITE(FDI_RX_MISC(pipe),
3342                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3343
3344                 reg = FDI_RX_CTL(pipe);
3345                 temp = I915_READ(reg);
3346                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3347                 temp |= FDI_COMPOSITE_SYNC;
3348                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3349
3350                 POSTING_READ(reg);
3351                 udelay(1); /* should be 0.5us */
3352
3353                 for (i = 0; i < 4; i++) {
3354                         reg = FDI_RX_IIR(pipe);
3355                         temp = I915_READ(reg);
3356                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3357
3358                         if (temp & FDI_RX_BIT_LOCK ||
3359                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3360                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3361                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3362                                               i);
3363                                 break;
3364                         }
3365                         udelay(1); /* should be 0.5us */
3366                 }
3367                 if (i == 4) {
3368                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3369                         continue;
3370                 }
3371
3372                 /* Train 2 */
3373                 reg = FDI_TX_CTL(pipe);
3374                 temp = I915_READ(reg);
3375                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3376                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3377                 I915_WRITE(reg, temp);
3378
3379                 reg = FDI_RX_CTL(pipe);
3380                 temp = I915_READ(reg);
3381                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3382                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3383                 I915_WRITE(reg, temp);
3384
3385                 POSTING_READ(reg);
3386                 udelay(2); /* should be 1.5us */
3387
3388                 for (i = 0; i < 4; i++) {
3389                         reg = FDI_RX_IIR(pipe);
3390                         temp = I915_READ(reg);
3391                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3392
3393                         if (temp & FDI_RX_SYMBOL_LOCK ||
3394                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3395                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3396                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3397                                               i);
3398                                 goto train_done;
3399                         }
3400                         udelay(2); /* should be 1.5us */
3401                 }
3402                 if (i == 4)
3403                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3404         }
3405
3406 train_done:
3407         DRM_DEBUG_KMS("FDI train done.\n");
3408 }
3409
3410 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3411 {
3412         struct drm_device *dev = intel_crtc->base.dev;
3413         struct drm_i915_private *dev_priv = dev->dev_private;
3414         int pipe = intel_crtc->pipe;
3415         u32 reg, temp;
3416
3417
3418         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3419         reg = FDI_RX_CTL(pipe);
3420         temp = I915_READ(reg);
3421         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3422         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3423         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3424         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3425
3426         POSTING_READ(reg);
3427         udelay(200);
3428
3429         /* Switch from Rawclk to PCDclk */
3430         temp = I915_READ(reg);
3431         I915_WRITE(reg, temp | FDI_PCDCLK);
3432
3433         POSTING_READ(reg);
3434         udelay(200);
3435
3436         /* Enable CPU FDI TX PLL, always on for Ironlake */
3437         reg = FDI_TX_CTL(pipe);
3438         temp = I915_READ(reg);
3439         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3440                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3441
3442                 POSTING_READ(reg);
3443                 udelay(100);
3444         }
3445 }
3446
3447 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3448 {
3449         struct drm_device *dev = intel_crtc->base.dev;
3450         struct drm_i915_private *dev_priv = dev->dev_private;
3451         int pipe = intel_crtc->pipe;
3452         u32 reg, temp;
3453
3454         /* Switch from PCDclk to Rawclk */
3455         reg = FDI_RX_CTL(pipe);
3456         temp = I915_READ(reg);
3457         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3458
3459         /* Disable CPU FDI TX PLL */
3460         reg = FDI_TX_CTL(pipe);
3461         temp = I915_READ(reg);
3462         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3463
3464         POSTING_READ(reg);
3465         udelay(100);
3466
3467         reg = FDI_RX_CTL(pipe);
3468         temp = I915_READ(reg);
3469         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3470
3471         /* Wait for the clocks to turn off. */
3472         POSTING_READ(reg);
3473         udelay(100);
3474 }
3475
3476 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3477 {
3478         struct drm_device *dev = crtc->dev;
3479         struct drm_i915_private *dev_priv = dev->dev_private;
3480         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3481         int pipe = intel_crtc->pipe;
3482         u32 reg, temp;
3483
3484         /* disable CPU FDI tx and PCH FDI rx */
3485         reg = FDI_TX_CTL(pipe);
3486         temp = I915_READ(reg);
3487         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3488         POSTING_READ(reg);
3489
3490         reg = FDI_RX_CTL(pipe);
3491         temp = I915_READ(reg);
3492         temp &= ~(0x7 << 16);
3493         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3494         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3495
3496         POSTING_READ(reg);
3497         udelay(100);
3498
3499         /* Ironlake workaround, disable clock pointer after downing FDI */
3500         if (HAS_PCH_IBX(dev))
3501                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3502
3503         /* still set train pattern 1 */
3504         reg = FDI_TX_CTL(pipe);
3505         temp = I915_READ(reg);
3506         temp &= ~FDI_LINK_TRAIN_NONE;
3507         temp |= FDI_LINK_TRAIN_PATTERN_1;
3508         I915_WRITE(reg, temp);
3509
3510         reg = FDI_RX_CTL(pipe);
3511         temp = I915_READ(reg);
3512         if (HAS_PCH_CPT(dev)) {
3513                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3514                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3515         } else {
3516                 temp &= ~FDI_LINK_TRAIN_NONE;
3517                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3518         }
3519         /* BPC in FDI rx is consistent with that in PIPECONF */
3520         temp &= ~(0x07 << 16);
3521         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3522         I915_WRITE(reg, temp);
3523
3524         POSTING_READ(reg);
3525         udelay(100);
3526 }
3527
3528 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3529 {
3530         struct intel_crtc *crtc;
3531
3532         /* Note that we don't need to be called with mode_config.lock here
3533          * as our list of CRTC objects is static for the lifetime of the
3534          * device and so cannot disappear as we iterate. Similarly, we can
3535          * happily treat the predicates as racy, atomic checks as userspace
3536          * cannot claim and pin a new fb without at least acquring the
3537          * struct_mutex and so serialising with us.
3538          */
3539         for_each_intel_crtc(dev, crtc) {
3540                 if (atomic_read(&crtc->unpin_work_count) == 0)
3541                         continue;
3542
3543                 if (crtc->unpin_work)
3544                         intel_wait_for_vblank(dev, crtc->pipe);
3545
3546                 return true;
3547         }
3548
3549         return false;
3550 }
3551
3552 static void page_flip_completed(struct intel_crtc *intel_crtc)
3553 {
3554         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3555         struct intel_unpin_work *work = intel_crtc->unpin_work;
3556
3557         /* ensure that the unpin work is consistent wrt ->pending. */
3558         smp_rmb();
3559         intel_crtc->unpin_work = NULL;
3560
3561         if (work->event)
3562                 drm_send_vblank_event(intel_crtc->base.dev,
3563                                       intel_crtc->pipe,
3564                                       work->event);
3565
3566         drm_crtc_vblank_put(&intel_crtc->base);
3567
3568         wake_up_all(&dev_priv->pending_flip_queue);
3569         queue_work(dev_priv->wq, &work->work);
3570
3571         trace_i915_flip_complete(intel_crtc->plane,
3572                                  work->pending_flip_obj);
3573 }
3574
3575 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3576 {
3577         struct drm_device *dev = crtc->dev;
3578         struct drm_i915_private *dev_priv = dev->dev_private;
3579
3580         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3581         if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3582                                        !intel_crtc_has_pending_flip(crtc),
3583                                        60*HZ) == 0)) {
3584                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3585
3586                 spin_lock_irq(&dev->event_lock);
3587                 if (intel_crtc->unpin_work) {
3588                         WARN_ONCE(1, "Removing stuck page flip\n");
3589                         page_flip_completed(intel_crtc);
3590                 }
3591                 spin_unlock_irq(&dev->event_lock);
3592         }
3593
3594         if (crtc->primary->fb) {
3595                 mutex_lock(&dev->struct_mutex);
3596                 intel_finish_fb(crtc->primary->fb);
3597                 mutex_unlock(&dev->struct_mutex);
3598         }
3599 }
3600
3601 /* Program iCLKIP clock to the desired frequency */
3602 static void lpt_program_iclkip(struct drm_crtc *crtc)
3603 {
3604         struct drm_device *dev = crtc->dev;
3605         struct drm_i915_private *dev_priv = dev->dev_private;
3606         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3607         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3608         u32 temp;
3609
3610         mutex_lock(&dev_priv->dpio_lock);
3611
3612         /* It is necessary to ungate the pixclk gate prior to programming
3613          * the divisors, and gate it back when it is done.
3614          */
3615         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3616
3617         /* Disable SSCCTL */
3618         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3619                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3620                                 SBI_SSCCTL_DISABLE,
3621                         SBI_ICLK);
3622
3623         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3624         if (clock == 20000) {
3625                 auxdiv = 1;
3626                 divsel = 0x41;
3627                 phaseinc = 0x20;
3628         } else {
3629                 /* The iCLK virtual clock root frequency is in MHz,
3630                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3631                  * divisors, it is necessary to divide one by another, so we
3632                  * convert the virtual clock precision to KHz here for higher
3633                  * precision.
3634                  */
3635                 u32 iclk_virtual_root_freq = 172800 * 1000;
3636                 u32 iclk_pi_range = 64;
3637                 u32 desired_divisor, msb_divisor_value, pi_value;
3638
3639                 desired_divisor = (iclk_virtual_root_freq / clock);
3640                 msb_divisor_value = desired_divisor / iclk_pi_range;
3641                 pi_value = desired_divisor % iclk_pi_range;
3642
3643                 auxdiv = 0;
3644                 divsel = msb_divisor_value - 2;
3645                 phaseinc = pi_value;
3646         }
3647
3648         /* This should not happen with any sane values */
3649         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3650                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3651         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3652                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3653
3654         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3655                         clock,
3656                         auxdiv,
3657                         divsel,
3658                         phasedir,
3659                         phaseinc);
3660
3661         /* Program SSCDIVINTPHASE6 */
3662         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3663         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3664         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3665         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3666         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3667         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3668         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3669         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3670
3671         /* Program SSCAUXDIV */
3672         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3673         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3674         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3675         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3676
3677         /* Enable modulator and associated divider */
3678         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3679         temp &= ~SBI_SSCCTL_DISABLE;
3680         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3681
3682         /* Wait for initialization time */
3683         udelay(24);
3684
3685         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3686
3687         mutex_unlock(&dev_priv->dpio_lock);
3688 }
3689
3690 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3691                                                 enum pipe pch_transcoder)
3692 {
3693         struct drm_device *dev = crtc->base.dev;
3694         struct drm_i915_private *dev_priv = dev->dev_private;
3695         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3696
3697         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3698                    I915_READ(HTOTAL(cpu_transcoder)));
3699         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3700                    I915_READ(HBLANK(cpu_transcoder)));
3701         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3702                    I915_READ(HSYNC(cpu_transcoder)));
3703
3704         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3705                    I915_READ(VTOTAL(cpu_transcoder)));
3706         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3707                    I915_READ(VBLANK(cpu_transcoder)));
3708         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3709                    I915_READ(VSYNC(cpu_transcoder)));
3710         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3711                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3712 }
3713
3714 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3715 {
3716         struct drm_i915_private *dev_priv = dev->dev_private;
3717         uint32_t temp;
3718
3719         temp = I915_READ(SOUTH_CHICKEN1);
3720         if (temp & FDI_BC_BIFURCATION_SELECT)
3721                 return;
3722
3723         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3724         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3725
3726         temp |= FDI_BC_BIFURCATION_SELECT;
3727         DRM_DEBUG_KMS("enabling fdi C rx\n");
3728         I915_WRITE(SOUTH_CHICKEN1, temp);
3729         POSTING_READ(SOUTH_CHICKEN1);
3730 }
3731
3732 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3733 {
3734         struct drm_device *dev = intel_crtc->base.dev;
3735         struct drm_i915_private *dev_priv = dev->dev_private;
3736
3737         switch (intel_crtc->pipe) {
3738         case PIPE_A:
3739                &