Merge tag 'gvt-fixes-2017-05-11' of https://github.com/01org/gvt-linux into drm-intel...
[muen/linux.git] / drivers / gpu / drm / i915 / gvt / render.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Kevin Tian <kevin.tian@intel.com>
26  *
27  * Contributors:
28  *    Zhi Wang <zhi.a.wang@intel.com>
29  *    Changbin Du <changbin.du@intel.com>
30  *    Zhenyu Wang <zhenyuw@linux.intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Bing Niu <bing.niu@intel.com>
33  *
34  */
35
36 #include "i915_drv.h"
37 #include "gvt.h"
38
39 struct render_mmio {
40         int ring_id;
41         i915_reg_t reg;
42         u32 mask;
43         bool in_context;
44         u32 value;
45 };
46
47 static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
48         {RCS, _MMIO(0x229c), 0xffff, false},
49         {RCS, _MMIO(0x2248), 0x0, false},
50         {RCS, _MMIO(0x2098), 0x0, false},
51         {RCS, _MMIO(0x20c0), 0xffff, true},
52         {RCS, _MMIO(0x24d0), 0, false},
53         {RCS, _MMIO(0x24d4), 0, false},
54         {RCS, _MMIO(0x24d8), 0, false},
55         {RCS, _MMIO(0x24dc), 0, false},
56         {RCS, _MMIO(0x24e0), 0, false},
57         {RCS, _MMIO(0x24e4), 0, false},
58         {RCS, _MMIO(0x24e8), 0, false},
59         {RCS, _MMIO(0x24ec), 0, false},
60         {RCS, _MMIO(0x24f0), 0, false},
61         {RCS, _MMIO(0x24f4), 0, false},
62         {RCS, _MMIO(0x24f8), 0, false},
63         {RCS, _MMIO(0x24fc), 0, false},
64         {RCS, _MMIO(0x7004), 0xffff, true},
65         {RCS, _MMIO(0x7008), 0xffff, true},
66         {RCS, _MMIO(0x7000), 0xffff, true},
67         {RCS, _MMIO(0x7010), 0xffff, true},
68         {RCS, _MMIO(0x7300), 0xffff, true},
69         {RCS, _MMIO(0x83a4), 0xffff, true},
70
71         {BCS, _MMIO(0x2229c), 0xffff, false},
72         {BCS, _MMIO(0x2209c), 0xffff, false},
73         {BCS, _MMIO(0x220c0), 0xffff, false},
74         {BCS, _MMIO(0x22098), 0x0, false},
75         {BCS, _MMIO(0x22028), 0x0, false},
76 };
77
78 static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
79         {RCS, _MMIO(0x229c), 0xffff, false},
80         {RCS, _MMIO(0x2248), 0x0, false},
81         {RCS, _MMIO(0x2098), 0x0, false},
82         {RCS, _MMIO(0x20c0), 0xffff, true},
83         {RCS, _MMIO(0x24d0), 0, false},
84         {RCS, _MMIO(0x24d4), 0, false},
85         {RCS, _MMIO(0x24d8), 0, false},
86         {RCS, _MMIO(0x24dc), 0, false},
87         {RCS, _MMIO(0x24e0), 0, false},
88         {RCS, _MMIO(0x24e4), 0, false},
89         {RCS, _MMIO(0x24e8), 0, false},
90         {RCS, _MMIO(0x24ec), 0, false},
91         {RCS, _MMIO(0x24f0), 0, false},
92         {RCS, _MMIO(0x24f4), 0, false},
93         {RCS, _MMIO(0x24f8), 0, false},
94         {RCS, _MMIO(0x24fc), 0, false},
95         {RCS, _MMIO(0x7004), 0xffff, true},
96         {RCS, _MMIO(0x7008), 0xffff, true},
97         {RCS, _MMIO(0x7000), 0xffff, true},
98         {RCS, _MMIO(0x7010), 0xffff, true},
99         {RCS, _MMIO(0x7300), 0xffff, true},
100         {RCS, _MMIO(0x83a4), 0xffff, true},
101
102         {RCS, _MMIO(0x40e0), 0, false},
103         {RCS, _MMIO(0x40e4), 0, false},
104         {RCS, _MMIO(0x2580), 0xffff, true},
105         {RCS, _MMIO(0x7014), 0xffff, true},
106         {RCS, _MMIO(0x20ec), 0xffff, false},
107         {RCS, _MMIO(0xb118), 0, false},
108         {RCS, _MMIO(0xe100), 0xffff, true},
109         {RCS, _MMIO(0xe180), 0xffff, true},
110         {RCS, _MMIO(0xe184), 0xffff, true},
111         {RCS, _MMIO(0xe188), 0xffff, true},
112         {RCS, _MMIO(0xe194), 0xffff, true},
113         {RCS, _MMIO(0x4de0), 0, false},
114         {RCS, _MMIO(0x4de4), 0, false},
115         {RCS, _MMIO(0x4de8), 0, false},
116         {RCS, _MMIO(0x4dec), 0, false},
117         {RCS, _MMIO(0x4df0), 0, false},
118         {RCS, _MMIO(0x4df4), 0, false},
119
120         {BCS, _MMIO(0x2229c), 0xffff, false},
121         {BCS, _MMIO(0x2209c), 0xffff, false},
122         {BCS, _MMIO(0x220c0), 0xffff, false},
123         {BCS, _MMIO(0x22098), 0x0, false},
124         {BCS, _MMIO(0x22028), 0x0, false},
125
126         {VCS2, _MMIO(0x1c028), 0xffff, false},
127
128         {VECS, _MMIO(0x1a028), 0xffff, false},
129
130         {RCS, _MMIO(0x7304), 0xffff, true},
131         {RCS, _MMIO(0x2248), 0x0, false},
132         {RCS, _MMIO(0x940c), 0x0, false},
133         {RCS, _MMIO(0x4ab8), 0x0, false},
134
135         {RCS, _MMIO(0x4ab0), 0x0, false},
136         {RCS, _MMIO(0x20d4), 0x0, false},
137
138         {RCS, _MMIO(0xb004), 0x0, false},
139         {RCS, _MMIO(0x20a0), 0x0, false},
140         {RCS, _MMIO(0x20e4), 0xffff, false},
141 };
142
143 static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
144 static u32 gen9_render_mocs_L3[32];
145
146 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
147 {
148         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
149         enum forcewake_domains fw;
150         i915_reg_t reg;
151         u32 regs[] = {
152                 [RCS] = 0x4260,
153                 [VCS] = 0x4264,
154                 [VCS2] = 0x4268,
155                 [BCS] = 0x426c,
156                 [VECS] = 0x4270,
157         };
158
159         if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
160                 return;
161
162         if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
163                 return;
164
165         reg = _MMIO(regs[ring_id]);
166
167         /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
168          * we need to put a forcewake when invalidating RCS TLB caches,
169          * otherwise device can go to RC6 state and interrupt invalidation
170          * process
171          */
172         fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
173                                             FW_REG_READ | FW_REG_WRITE);
174         if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
175                 fw |= FORCEWAKE_RENDER;
176
177         intel_uncore_forcewake_get(dev_priv, fw);
178
179         I915_WRITE_FW(reg, 0x1);
180
181         if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
182                 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
183         else
184                 vgpu_vreg(vgpu, regs[ring_id]) = 0;
185
186         intel_uncore_forcewake_put(dev_priv, fw);
187
188         gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
189 }
190
191 static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
192 {
193         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
194         i915_reg_t offset, l3_offset;
195         u32 regs[] = {
196                 [RCS] = 0xc800,
197                 [VCS] = 0xc900,
198                 [VCS2] = 0xca00,
199                 [BCS] = 0xcc00,
200                 [VECS] = 0xcb00,
201         };
202         int i;
203
204         if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
205                 return;
206
207         offset.reg = regs[ring_id];
208         for (i = 0; i < 64; i++) {
209                 gen9_render_mocs[ring_id][i] = I915_READ(offset);
210                 I915_WRITE(offset, vgpu_vreg(vgpu, offset));
211                 POSTING_READ(offset);
212                 offset.reg += 4;
213         }
214
215         if (ring_id == RCS) {
216                 l3_offset.reg = 0xb020;
217                 for (i = 0; i < 32; i++) {
218                         gen9_render_mocs_L3[i] = I915_READ(l3_offset);
219                         I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
220                         POSTING_READ(l3_offset);
221                         l3_offset.reg += 4;
222                 }
223         }
224 }
225
226 static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
227 {
228         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
229         i915_reg_t offset, l3_offset;
230         u32 regs[] = {
231                 [RCS] = 0xc800,
232                 [VCS] = 0xc900,
233                 [VCS2] = 0xca00,
234                 [BCS] = 0xcc00,
235                 [VECS] = 0xcb00,
236         };
237         int i;
238
239         if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
240                 return;
241
242         offset.reg = regs[ring_id];
243         for (i = 0; i < 64; i++) {
244                 vgpu_vreg(vgpu, offset) = I915_READ(offset);
245                 I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
246                 POSTING_READ(offset);
247                 offset.reg += 4;
248         }
249
250         if (ring_id == RCS) {
251                 l3_offset.reg = 0xb020;
252                 for (i = 0; i < 32; i++) {
253                         vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
254                         I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
255                         POSTING_READ(l3_offset);
256                         l3_offset.reg += 4;
257                 }
258         }
259 }
260
261 #define CTX_CONTEXT_CONTROL_VAL 0x03
262
263 void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
264 {
265         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
266         struct render_mmio *mmio;
267         u32 v;
268         int i, array_size;
269         u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
270         u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
271         u32 inhibit_mask =
272                 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
273
274         if (IS_SKYLAKE(vgpu->gvt->dev_priv)
275                 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
276                 mmio = gen9_render_mmio_list;
277                 array_size = ARRAY_SIZE(gen9_render_mmio_list);
278                 load_mocs(vgpu, ring_id);
279         } else {
280                 mmio = gen8_render_mmio_list;
281                 array_size = ARRAY_SIZE(gen8_render_mmio_list);
282         }
283
284         for (i = 0; i < array_size; i++, mmio++) {
285                 if (mmio->ring_id != ring_id)
286                         continue;
287
288                 mmio->value = I915_READ(mmio->reg);
289
290                 /*
291                  * if it is an inhibit context, load in_context mmio
292                  * into HW by mmio write. If it is not, skip this mmio
293                  * write.
294                  */
295                 if (mmio->in_context &&
296                                 ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
297                                 i915.enable_execlists)
298                         continue;
299
300                 if (mmio->mask)
301                         v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
302                 else
303                         v = vgpu_vreg(vgpu, mmio->reg);
304
305                 I915_WRITE(mmio->reg, v);
306                 POSTING_READ(mmio->reg);
307
308                 gvt_dbg_render("load reg %x old %x new %x\n",
309                                 i915_mmio_reg_offset(mmio->reg),
310                                 mmio->value, v);
311         }
312         handle_tlb_pending_event(vgpu, ring_id);
313 }
314
315 void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
316 {
317         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
318         struct render_mmio *mmio;
319         u32 v;
320         int i, array_size;
321
322         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
323                 mmio = gen9_render_mmio_list;
324                 array_size = ARRAY_SIZE(gen9_render_mmio_list);
325                 restore_mocs(vgpu, ring_id);
326         } else {
327                 mmio = gen8_render_mmio_list;
328                 array_size = ARRAY_SIZE(gen8_render_mmio_list);
329         }
330
331         for (i = 0; i < array_size; i++, mmio++) {
332                 if (mmio->ring_id != ring_id)
333                         continue;
334
335                 vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
336
337                 if (mmio->mask) {
338                         vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
339                         v = mmio->value | (mmio->mask << 16);
340                 } else
341                         v = mmio->value;
342
343                 if (mmio->in_context)
344                         continue;
345
346                 I915_WRITE(mmio->reg, v);
347                 POSTING_READ(mmio->reg);
348
349                 gvt_dbg_render("restore reg %x old %x new %x\n",
350                                 i915_mmio_reg_offset(mmio->reg),
351                                 mmio->value, v);
352         }
353 }