d6429157ffb60c530eba4eefb09bbf0b63dd5d2d
[muen/linux.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian K├Ânig <christian.koenig@amd.com>
23  */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int uvd_v6_0_start(struct amdgpu_device *adev);
43 static void uvd_v6_0_stop(struct amdgpu_device *adev);
44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
45 static int uvd_v6_0_set_clockgating_state(void *handle,
46                                           enum amd_clockgating_state state);
47 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
48                                  bool enable);
49
50 /**
51  * uvd_v6_0_ring_get_rptr - get read pointer
52  *
53  * @ring: amdgpu_ring pointer
54  *
55  * Returns the current hardware read pointer
56  */
57 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
58 {
59         struct amdgpu_device *adev = ring->adev;
60
61         return RREG32(mmUVD_RBC_RB_RPTR);
62 }
63
64 /**
65  * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
66  *
67  * @ring: amdgpu_ring pointer
68  *
69  * Returns the current hardware enc read pointer
70  */
71 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
72 {
73         struct amdgpu_device *adev = ring->adev;
74
75         if (ring == &adev->uvd.ring_enc[0])
76                 return RREG32(mmUVD_RB_RPTR);
77         else
78                 return RREG32(mmUVD_RB_RPTR2);
79 }
80 /**
81  * uvd_v6_0_ring_get_wptr - get write pointer
82  *
83  * @ring: amdgpu_ring pointer
84  *
85  * Returns the current hardware write pointer
86  */
87 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
88 {
89         struct amdgpu_device *adev = ring->adev;
90
91         return RREG32(mmUVD_RBC_RB_WPTR);
92 }
93
94 /**
95  * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
96  *
97  * @ring: amdgpu_ring pointer
98  *
99  * Returns the current hardware enc write pointer
100  */
101 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
102 {
103         struct amdgpu_device *adev = ring->adev;
104
105         if (ring == &adev->uvd.ring_enc[0])
106                 return RREG32(mmUVD_RB_WPTR);
107         else
108                 return RREG32(mmUVD_RB_WPTR2);
109 }
110
111 /**
112  * uvd_v6_0_ring_set_wptr - set write pointer
113  *
114  * @ring: amdgpu_ring pointer
115  *
116  * Commits the write pointer to the hardware
117  */
118 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
119 {
120         struct amdgpu_device *adev = ring->adev;
121
122         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
123 }
124
125 /**
126  * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
127  *
128  * @ring: amdgpu_ring pointer
129  *
130  * Commits the enc write pointer to the hardware
131  */
132 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
133 {
134         struct amdgpu_device *adev = ring->adev;
135
136         if (ring == &adev->uvd.ring_enc[0])
137                 WREG32(mmUVD_RB_WPTR,
138                         lower_32_bits(ring->wptr));
139         else
140                 WREG32(mmUVD_RB_WPTR2,
141                         lower_32_bits(ring->wptr));
142 }
143
144 static int uvd_v6_0_early_init(void *handle)
145 {
146         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147
148         uvd_v6_0_set_ring_funcs(adev);
149         uvd_v6_0_set_irq_funcs(adev);
150
151         return 0;
152 }
153
154 static int uvd_v6_0_sw_init(void *handle)
155 {
156         struct amdgpu_ring *ring;
157         int r;
158         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159
160         /* UVD TRAP */
161         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
162         if (r)
163                 return r;
164
165         r = amdgpu_uvd_sw_init(adev);
166         if (r)
167                 return r;
168
169         r = amdgpu_uvd_resume(adev);
170         if (r)
171                 return r;
172
173         ring = &adev->uvd.ring;
174         sprintf(ring->name, "uvd");
175         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
176
177         return r;
178 }
179
180 static int uvd_v6_0_sw_fini(void *handle)
181 {
182         int r;
183         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184
185         r = amdgpu_uvd_suspend(adev);
186         if (r)
187                 return r;
188
189         return amdgpu_uvd_sw_fini(adev);
190 }
191
192 /**
193  * uvd_v6_0_hw_init - start and test UVD block
194  *
195  * @adev: amdgpu_device pointer
196  *
197  * Initialize the hardware, boot up the VCPU and do some testing
198  */
199 static int uvd_v6_0_hw_init(void *handle)
200 {
201         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
202         struct amdgpu_ring *ring = &adev->uvd.ring;
203         uint32_t tmp;
204         int r;
205
206         amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
207         uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
208         uvd_v6_0_enable_mgcg(adev, true);
209
210         ring->ready = true;
211         r = amdgpu_ring_test_ring(ring);
212         if (r) {
213                 ring->ready = false;
214                 goto done;
215         }
216
217         r = amdgpu_ring_alloc(ring, 10);
218         if (r) {
219                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
220                 goto done;
221         }
222
223         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
224         amdgpu_ring_write(ring, tmp);
225         amdgpu_ring_write(ring, 0xFFFFF);
226
227         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
228         amdgpu_ring_write(ring, tmp);
229         amdgpu_ring_write(ring, 0xFFFFF);
230
231         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
232         amdgpu_ring_write(ring, tmp);
233         amdgpu_ring_write(ring, 0xFFFFF);
234
235         /* Clear timeout status bits */
236         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
237         amdgpu_ring_write(ring, 0x8);
238
239         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
240         amdgpu_ring_write(ring, 3);
241
242         amdgpu_ring_commit(ring);
243
244 done:
245         if (!r)
246                 DRM_INFO("UVD initialized successfully.\n");
247
248         return r;
249 }
250
251 /**
252  * uvd_v6_0_hw_fini - stop the hardware block
253  *
254  * @adev: amdgpu_device pointer
255  *
256  * Stop the UVD block, mark ring as not ready any more
257  */
258 static int uvd_v6_0_hw_fini(void *handle)
259 {
260         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
261         struct amdgpu_ring *ring = &adev->uvd.ring;
262
263         if (RREG32(mmUVD_STATUS) != 0)
264                 uvd_v6_0_stop(adev);
265
266         ring->ready = false;
267
268         return 0;
269 }
270
271 static int uvd_v6_0_suspend(void *handle)
272 {
273         int r;
274         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
275
276         r = uvd_v6_0_hw_fini(adev);
277         if (r)
278                 return r;
279
280         /* Skip this for APU for now */
281         if (!(adev->flags & AMD_IS_APU))
282                 r = amdgpu_uvd_suspend(adev);
283
284         return r;
285 }
286
287 static int uvd_v6_0_resume(void *handle)
288 {
289         int r;
290         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
291
292         /* Skip this for APU for now */
293         if (!(adev->flags & AMD_IS_APU)) {
294                 r = amdgpu_uvd_resume(adev);
295                 if (r)
296                         return r;
297         }
298         return uvd_v6_0_hw_init(adev);
299 }
300
301 /**
302  * uvd_v6_0_mc_resume - memory controller programming
303  *
304  * @adev: amdgpu_device pointer
305  *
306  * Let the UVD memory controller know it's offsets
307  */
308 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
309 {
310         uint64_t offset;
311         uint32_t size;
312
313         /* programm memory controller bits 0-27 */
314         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
315                         lower_32_bits(adev->uvd.gpu_addr));
316         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
317                         upper_32_bits(adev->uvd.gpu_addr));
318
319         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
320         size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
321         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
322         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
323
324         offset += size;
325         size = AMDGPU_UVD_HEAP_SIZE;
326         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
327         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
328
329         offset += size;
330         size = AMDGPU_UVD_STACK_SIZE +
331                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
332         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
333         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
334
335         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
336         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
337         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
338
339         WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
340 }
341
342 #if 0
343 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
344                 bool enable)
345 {
346         u32 data, data1;
347
348         data = RREG32(mmUVD_CGC_GATE);
349         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
350         if (enable) {
351                 data |= UVD_CGC_GATE__SYS_MASK |
352                                 UVD_CGC_GATE__UDEC_MASK |
353                                 UVD_CGC_GATE__MPEG2_MASK |
354                                 UVD_CGC_GATE__RBC_MASK |
355                                 UVD_CGC_GATE__LMI_MC_MASK |
356                                 UVD_CGC_GATE__IDCT_MASK |
357                                 UVD_CGC_GATE__MPRD_MASK |
358                                 UVD_CGC_GATE__MPC_MASK |
359                                 UVD_CGC_GATE__LBSI_MASK |
360                                 UVD_CGC_GATE__LRBBM_MASK |
361                                 UVD_CGC_GATE__UDEC_RE_MASK |
362                                 UVD_CGC_GATE__UDEC_CM_MASK |
363                                 UVD_CGC_GATE__UDEC_IT_MASK |
364                                 UVD_CGC_GATE__UDEC_DB_MASK |
365                                 UVD_CGC_GATE__UDEC_MP_MASK |
366                                 UVD_CGC_GATE__WCB_MASK |
367                                 UVD_CGC_GATE__VCPU_MASK |
368                                 UVD_CGC_GATE__SCPU_MASK;
369                 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
370                                 UVD_SUVD_CGC_GATE__SIT_MASK |
371                                 UVD_SUVD_CGC_GATE__SMP_MASK |
372                                 UVD_SUVD_CGC_GATE__SCM_MASK |
373                                 UVD_SUVD_CGC_GATE__SDB_MASK |
374                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
375                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
376                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
377                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
378                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
379                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
380                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
381                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
382         } else {
383                 data &= ~(UVD_CGC_GATE__SYS_MASK |
384                                 UVD_CGC_GATE__UDEC_MASK |
385                                 UVD_CGC_GATE__MPEG2_MASK |
386                                 UVD_CGC_GATE__RBC_MASK |
387                                 UVD_CGC_GATE__LMI_MC_MASK |
388                                 UVD_CGC_GATE__LMI_UMC_MASK |
389                                 UVD_CGC_GATE__IDCT_MASK |
390                                 UVD_CGC_GATE__MPRD_MASK |
391                                 UVD_CGC_GATE__MPC_MASK |
392                                 UVD_CGC_GATE__LBSI_MASK |
393                                 UVD_CGC_GATE__LRBBM_MASK |
394                                 UVD_CGC_GATE__UDEC_RE_MASK |
395                                 UVD_CGC_GATE__UDEC_CM_MASK |
396                                 UVD_CGC_GATE__UDEC_IT_MASK |
397                                 UVD_CGC_GATE__UDEC_DB_MASK |
398                                 UVD_CGC_GATE__UDEC_MP_MASK |
399                                 UVD_CGC_GATE__WCB_MASK |
400                                 UVD_CGC_GATE__VCPU_MASK |
401                                 UVD_CGC_GATE__SCPU_MASK);
402                 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
403                                 UVD_SUVD_CGC_GATE__SIT_MASK |
404                                 UVD_SUVD_CGC_GATE__SMP_MASK |
405                                 UVD_SUVD_CGC_GATE__SCM_MASK |
406                                 UVD_SUVD_CGC_GATE__SDB_MASK |
407                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
408                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
409                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
410                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
411                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
412                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
413                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
414                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
415         }
416         WREG32(mmUVD_CGC_GATE, data);
417         WREG32(mmUVD_SUVD_CGC_GATE, data1);
418 }
419 #endif
420
421 /**
422  * uvd_v6_0_start - start UVD block
423  *
424  * @adev: amdgpu_device pointer
425  *
426  * Setup and start the UVD block
427  */
428 static int uvd_v6_0_start(struct amdgpu_device *adev)
429 {
430         struct amdgpu_ring *ring = &adev->uvd.ring;
431         uint32_t rb_bufsz, tmp;
432         uint32_t lmi_swap_cntl;
433         uint32_t mp_swap_cntl;
434         int i, j, r;
435
436         /* disable DPG */
437         WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
438
439         /* disable byte swapping */
440         lmi_swap_cntl = 0;
441         mp_swap_cntl = 0;
442
443         uvd_v6_0_mc_resume(adev);
444
445         /* disable interupt */
446         WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
447
448         /* stall UMC and register bus before resetting VCPU */
449         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
450         mdelay(1);
451
452         /* put LMI, VCPU, RBC etc... into reset */
453         WREG32(mmUVD_SOFT_RESET,
454                 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
455                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
456                 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
457                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
458                 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
459                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
460                 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
461                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
462         mdelay(5);
463
464         /* take UVD block out of reset */
465         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
466         mdelay(5);
467
468         /* initialize UVD memory controller */
469         WREG32(mmUVD_LMI_CTRL,
470                 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
471                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
472                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
473                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
474                 UVD_LMI_CTRL__REQ_MODE_MASK |
475                 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
476
477 #ifdef __BIG_ENDIAN
478         /* swap (8 in 32) RB and IB */
479         lmi_swap_cntl = 0xa;
480         mp_swap_cntl = 0;
481 #endif
482         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
483         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
484
485         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
486         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
487         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
488         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
489         WREG32(mmUVD_MPC_SET_ALU, 0);
490         WREG32(mmUVD_MPC_SET_MUX, 0x88);
491
492         /* take all subblocks out of reset, except VCPU */
493         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
494         mdelay(5);
495
496         /* enable VCPU clock */
497         WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
498
499         /* enable UMC */
500         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
501
502         /* boot up the VCPU */
503         WREG32(mmUVD_SOFT_RESET, 0);
504         mdelay(10);
505
506         for (i = 0; i < 10; ++i) {
507                 uint32_t status;
508
509                 for (j = 0; j < 100; ++j) {
510                         status = RREG32(mmUVD_STATUS);
511                         if (status & 2)
512                                 break;
513                         mdelay(10);
514                 }
515                 r = 0;
516                 if (status & 2)
517                         break;
518
519                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
520                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
521                 mdelay(10);
522                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
523                 mdelay(10);
524                 r = -1;
525         }
526
527         if (r) {
528                 DRM_ERROR("UVD not responding, giving up!!!\n");
529                 return r;
530         }
531         /* enable master interrupt */
532         WREG32_P(mmUVD_MASTINT_EN,
533                 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
534                 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
535
536         /* clear the bit 4 of UVD_STATUS */
537         WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
538
539         /* force RBC into idle state */
540         rb_bufsz = order_base_2(ring->ring_size);
541         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
542         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
543         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
544         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
545         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
546         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
547         WREG32(mmUVD_RBC_RB_CNTL, tmp);
548
549         /* set the write pointer delay */
550         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
551
552         /* set the wb address */
553         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
554
555         /* programm the RB_BASE for ring buffer */
556         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
557                         lower_32_bits(ring->gpu_addr));
558         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
559                         upper_32_bits(ring->gpu_addr));
560
561         /* Initialize the ring buffer's read and write pointers */
562         WREG32(mmUVD_RBC_RB_RPTR, 0);
563
564         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
565         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
566
567         WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
568
569         return 0;
570 }
571
572 /**
573  * uvd_v6_0_stop - stop UVD block
574  *
575  * @adev: amdgpu_device pointer
576  *
577  * stop the UVD block
578  */
579 static void uvd_v6_0_stop(struct amdgpu_device *adev)
580 {
581         /* force RBC into idle state */
582         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
583
584         /* Stall UMC and register bus before resetting VCPU */
585         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
586         mdelay(1);
587
588         /* put VCPU into reset */
589         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
590         mdelay(5);
591
592         /* disable VCPU clock */
593         WREG32(mmUVD_VCPU_CNTL, 0x0);
594
595         /* Unstall UMC and register bus */
596         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
597
598         WREG32(mmUVD_STATUS, 0);
599 }
600
601 /**
602  * uvd_v6_0_ring_emit_fence - emit an fence & trap command
603  *
604  * @ring: amdgpu_ring pointer
605  * @fence: fence to emit
606  *
607  * Write a fence and a trap command to the ring.
608  */
609 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
610                                      unsigned flags)
611 {
612         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
613
614         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
615         amdgpu_ring_write(ring, seq);
616         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
617         amdgpu_ring_write(ring, addr & 0xffffffff);
618         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
619         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
620         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
621         amdgpu_ring_write(ring, 0);
622
623         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
624         amdgpu_ring_write(ring, 0);
625         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
626         amdgpu_ring_write(ring, 0);
627         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
628         amdgpu_ring_write(ring, 2);
629 }
630
631 /**
632  * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
633  *
634  * @ring: amdgpu_ring pointer
635  * @fence: fence to emit
636  *
637  * Write enc a fence and a trap command to the ring.
638  */
639 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
640                         u64 seq, unsigned flags)
641 {
642         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
643
644         amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
645         amdgpu_ring_write(ring, addr);
646         amdgpu_ring_write(ring, upper_32_bits(addr));
647         amdgpu_ring_write(ring, seq);
648         amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
649 }
650
651 /**
652  * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
653  *
654  * @ring: amdgpu_ring pointer
655  *
656  * Emits an hdp flush.
657  */
658 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
659 {
660         amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
661         amdgpu_ring_write(ring, 0);
662 }
663
664 /**
665  * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
666  *
667  * @ring: amdgpu_ring pointer
668  *
669  * Emits an hdp invalidate.
670  */
671 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
672 {
673         amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
674         amdgpu_ring_write(ring, 1);
675 }
676
677 /**
678  * uvd_v6_0_ring_test_ring - register write test
679  *
680  * @ring: amdgpu_ring pointer
681  *
682  * Test if we can successfully write to the context register
683  */
684 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
685 {
686         struct amdgpu_device *adev = ring->adev;
687         uint32_t tmp = 0;
688         unsigned i;
689         int r;
690
691         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
692         r = amdgpu_ring_alloc(ring, 3);
693         if (r) {
694                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
695                           ring->idx, r);
696                 return r;
697         }
698         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
699         amdgpu_ring_write(ring, 0xDEADBEEF);
700         amdgpu_ring_commit(ring);
701         for (i = 0; i < adev->usec_timeout; i++) {
702                 tmp = RREG32(mmUVD_CONTEXT_ID);
703                 if (tmp == 0xDEADBEEF)
704                         break;
705                 DRM_UDELAY(1);
706         }
707
708         if (i < adev->usec_timeout) {
709                 DRM_INFO("ring test on %d succeeded in %d usecs\n",
710                          ring->idx, i);
711         } else {
712                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
713                           ring->idx, tmp);
714                 r = -EINVAL;
715         }
716         return r;
717 }
718
719 /**
720  * uvd_v6_0_ring_emit_ib - execute indirect buffer
721  *
722  * @ring: amdgpu_ring pointer
723  * @ib: indirect buffer to execute
724  *
725  * Write ring commands to execute the indirect buffer
726  */
727 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
728                                   struct amdgpu_ib *ib,
729                                   unsigned vm_id, bool ctx_switch)
730 {
731         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
732         amdgpu_ring_write(ring, vm_id);
733
734         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
735         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
736         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
737         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
738         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
739         amdgpu_ring_write(ring, ib->length_dw);
740 }
741
742 /**
743  * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
744  *
745  * @ring: amdgpu_ring pointer
746  * @ib: indirect buffer to execute
747  *
748  * Write enc ring commands to execute the indirect buffer
749  */
750 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
751                 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
752 {
753         amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
754         amdgpu_ring_write(ring, vm_id);
755         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
756         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
757         amdgpu_ring_write(ring, ib->length_dw);
758 }
759
760 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
761                                          unsigned vm_id, uint64_t pd_addr)
762 {
763         uint32_t reg;
764
765         if (vm_id < 8)
766                 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
767         else
768                 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
769
770         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
771         amdgpu_ring_write(ring, reg << 2);
772         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
773         amdgpu_ring_write(ring, pd_addr >> 12);
774         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
775         amdgpu_ring_write(ring, 0x8);
776
777         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
778         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
779         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
780         amdgpu_ring_write(ring, 1 << vm_id);
781         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
782         amdgpu_ring_write(ring, 0x8);
783
784         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
785         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
786         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
787         amdgpu_ring_write(ring, 0);
788         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
789         amdgpu_ring_write(ring, 1 << vm_id); /* mask */
790         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
791         amdgpu_ring_write(ring, 0xC);
792 }
793
794 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
795 {
796         uint32_t seq = ring->fence_drv.sync_seq;
797         uint64_t addr = ring->fence_drv.gpu_addr;
798
799         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
800         amdgpu_ring_write(ring, lower_32_bits(addr));
801         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
802         amdgpu_ring_write(ring, upper_32_bits(addr));
803         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
804         amdgpu_ring_write(ring, 0xffffffff); /* mask */
805         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
806         amdgpu_ring_write(ring, seq);
807         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
808         amdgpu_ring_write(ring, 0xE);
809 }
810
811 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
812 {
813         uint32_t seq = ring->fence_drv.sync_seq;
814         uint64_t addr = ring->fence_drv.gpu_addr;
815
816         amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
817         amdgpu_ring_write(ring, lower_32_bits(addr));
818         amdgpu_ring_write(ring, upper_32_bits(addr));
819         amdgpu_ring_write(ring, seq);
820 }
821
822 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
823 {
824         amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
825 }
826
827 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
828         unsigned int vm_id, uint64_t pd_addr)
829 {
830         amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
831         amdgpu_ring_write(ring, vm_id);
832         amdgpu_ring_write(ring, pd_addr >> 12);
833
834         amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
835         amdgpu_ring_write(ring, vm_id);
836 }
837
838 static bool uvd_v6_0_is_idle(void *handle)
839 {
840         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
841
842         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
843 }
844
845 static int uvd_v6_0_wait_for_idle(void *handle)
846 {
847         unsigned i;
848         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849
850         for (i = 0; i < adev->usec_timeout; i++) {
851                 if (uvd_v6_0_is_idle(handle))
852                         return 0;
853         }
854         return -ETIMEDOUT;
855 }
856
857 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
858 static bool uvd_v6_0_check_soft_reset(void *handle)
859 {
860         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
861         u32 srbm_soft_reset = 0;
862         u32 tmp = RREG32(mmSRBM_STATUS);
863
864         if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
865             REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
866             (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
867                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
868
869         if (srbm_soft_reset) {
870                 adev->uvd.srbm_soft_reset = srbm_soft_reset;
871                 return true;
872         } else {
873                 adev->uvd.srbm_soft_reset = 0;
874                 return false;
875         }
876 }
877
878 static int uvd_v6_0_pre_soft_reset(void *handle)
879 {
880         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881
882         if (!adev->uvd.srbm_soft_reset)
883                 return 0;
884
885         uvd_v6_0_stop(adev);
886         return 0;
887 }
888
889 static int uvd_v6_0_soft_reset(void *handle)
890 {
891         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
892         u32 srbm_soft_reset;
893
894         if (!adev->uvd.srbm_soft_reset)
895                 return 0;
896         srbm_soft_reset = adev->uvd.srbm_soft_reset;
897
898         if (srbm_soft_reset) {
899                 u32 tmp;
900
901                 tmp = RREG32(mmSRBM_SOFT_RESET);
902                 tmp |= srbm_soft_reset;
903                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
904                 WREG32(mmSRBM_SOFT_RESET, tmp);
905                 tmp = RREG32(mmSRBM_SOFT_RESET);
906
907                 udelay(50);
908
909                 tmp &= ~srbm_soft_reset;
910                 WREG32(mmSRBM_SOFT_RESET, tmp);
911                 tmp = RREG32(mmSRBM_SOFT_RESET);
912
913                 /* Wait a little for things to settle down */
914                 udelay(50);
915         }
916
917         return 0;
918 }
919
920 static int uvd_v6_0_post_soft_reset(void *handle)
921 {
922         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
923
924         if (!adev->uvd.srbm_soft_reset)
925                 return 0;
926
927         mdelay(5);
928
929         return uvd_v6_0_start(adev);
930 }
931
932 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
933                                         struct amdgpu_irq_src *source,
934                                         unsigned type,
935                                         enum amdgpu_interrupt_state state)
936 {
937         // TODO
938         return 0;
939 }
940
941 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
942                                       struct amdgpu_irq_src *source,
943                                       struct amdgpu_iv_entry *entry)
944 {
945         DRM_DEBUG("IH: UVD TRAP\n");
946         amdgpu_fence_process(&adev->uvd.ring);
947         return 0;
948 }
949
950 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
951 {
952         uint32_t data1, data3;
953
954         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
955         data3 = RREG32(mmUVD_CGC_GATE);
956
957         data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
958                      UVD_SUVD_CGC_GATE__SIT_MASK |
959                      UVD_SUVD_CGC_GATE__SMP_MASK |
960                      UVD_SUVD_CGC_GATE__SCM_MASK |
961                      UVD_SUVD_CGC_GATE__SDB_MASK |
962                      UVD_SUVD_CGC_GATE__SRE_H264_MASK |
963                      UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
964                      UVD_SUVD_CGC_GATE__SIT_H264_MASK |
965                      UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
966                      UVD_SUVD_CGC_GATE__SCM_H264_MASK |
967                      UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
968                      UVD_SUVD_CGC_GATE__SDB_H264_MASK |
969                      UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
970
971         if (enable) {
972                 data3 |= (UVD_CGC_GATE__SYS_MASK       |
973                         UVD_CGC_GATE__UDEC_MASK      |
974                         UVD_CGC_GATE__MPEG2_MASK     |
975                         UVD_CGC_GATE__RBC_MASK       |
976                         UVD_CGC_GATE__LMI_MC_MASK    |
977                         UVD_CGC_GATE__LMI_UMC_MASK   |
978                         UVD_CGC_GATE__IDCT_MASK      |
979                         UVD_CGC_GATE__MPRD_MASK      |
980                         UVD_CGC_GATE__MPC_MASK       |
981                         UVD_CGC_GATE__LBSI_MASK      |
982                         UVD_CGC_GATE__LRBBM_MASK     |
983                         UVD_CGC_GATE__UDEC_RE_MASK   |
984                         UVD_CGC_GATE__UDEC_CM_MASK   |
985                         UVD_CGC_GATE__UDEC_IT_MASK   |
986                         UVD_CGC_GATE__UDEC_DB_MASK   |
987                         UVD_CGC_GATE__UDEC_MP_MASK   |
988                         UVD_CGC_GATE__WCB_MASK       |
989                         UVD_CGC_GATE__JPEG_MASK      |
990                         UVD_CGC_GATE__SCPU_MASK      |
991                         UVD_CGC_GATE__JPEG2_MASK);
992                 /* only in pg enabled, we can gate clock to vcpu*/
993                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
994                         data3 |= UVD_CGC_GATE__VCPU_MASK;
995
996                 data3 &= ~UVD_CGC_GATE__REGS_MASK;
997         } else {
998                 data3 = 0;
999         }
1000
1001         WREG32(mmUVD_SUVD_CGC_GATE, data1);
1002         WREG32(mmUVD_CGC_GATE, data3);
1003 }
1004
1005 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1006 {
1007         uint32_t data, data2;
1008
1009         data = RREG32(mmUVD_CGC_CTRL);
1010         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1011
1012
1013         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1014                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1015
1016
1017         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1018                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1019                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1020
1021         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1022                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1023                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1024                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1025                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1026                         UVD_CGC_CTRL__SYS_MODE_MASK |
1027                         UVD_CGC_CTRL__UDEC_MODE_MASK |
1028                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
1029                         UVD_CGC_CTRL__REGS_MODE_MASK |
1030                         UVD_CGC_CTRL__RBC_MODE_MASK |
1031                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1032                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1033                         UVD_CGC_CTRL__IDCT_MODE_MASK |
1034                         UVD_CGC_CTRL__MPRD_MODE_MASK |
1035                         UVD_CGC_CTRL__MPC_MODE_MASK |
1036                         UVD_CGC_CTRL__LBSI_MODE_MASK |
1037                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
1038                         UVD_CGC_CTRL__WCB_MODE_MASK |
1039                         UVD_CGC_CTRL__VCPU_MODE_MASK |
1040                         UVD_CGC_CTRL__JPEG_MODE_MASK |
1041                         UVD_CGC_CTRL__SCPU_MODE_MASK |
1042                         UVD_CGC_CTRL__JPEG2_MODE_MASK);
1043         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1044                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1045                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1046                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1047                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1048
1049         WREG32(mmUVD_CGC_CTRL, data);
1050         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1051 }
1052
1053 #if 0
1054 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1055 {
1056         uint32_t data, data1, cgc_flags, suvd_flags;
1057
1058         data = RREG32(mmUVD_CGC_GATE);
1059         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1060
1061         cgc_flags = UVD_CGC_GATE__SYS_MASK |
1062                 UVD_CGC_GATE__UDEC_MASK |
1063                 UVD_CGC_GATE__MPEG2_MASK |
1064                 UVD_CGC_GATE__RBC_MASK |
1065                 UVD_CGC_GATE__LMI_MC_MASK |
1066                 UVD_CGC_GATE__IDCT_MASK |
1067                 UVD_CGC_GATE__MPRD_MASK |
1068                 UVD_CGC_GATE__MPC_MASK |
1069                 UVD_CGC_GATE__LBSI_MASK |
1070                 UVD_CGC_GATE__LRBBM_MASK |
1071                 UVD_CGC_GATE__UDEC_RE_MASK |
1072                 UVD_CGC_GATE__UDEC_CM_MASK |
1073                 UVD_CGC_GATE__UDEC_IT_MASK |
1074                 UVD_CGC_GATE__UDEC_DB_MASK |
1075                 UVD_CGC_GATE__UDEC_MP_MASK |
1076                 UVD_CGC_GATE__WCB_MASK |
1077                 UVD_CGC_GATE__VCPU_MASK |
1078                 UVD_CGC_GATE__SCPU_MASK |
1079                 UVD_CGC_GATE__JPEG_MASK |
1080                 UVD_CGC_GATE__JPEG2_MASK;
1081
1082         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1083                                 UVD_SUVD_CGC_GATE__SIT_MASK |
1084                                 UVD_SUVD_CGC_GATE__SMP_MASK |
1085                                 UVD_SUVD_CGC_GATE__SCM_MASK |
1086                                 UVD_SUVD_CGC_GATE__SDB_MASK;
1087
1088         data |= cgc_flags;
1089         data1 |= suvd_flags;
1090
1091         WREG32(mmUVD_CGC_GATE, data);
1092         WREG32(mmUVD_SUVD_CGC_GATE, data1);
1093 }
1094 #endif
1095
1096 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1097                                  bool enable)
1098 {
1099         u32 orig, data;
1100
1101         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1102                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1103                 data |= 0xfff;
1104                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1105
1106                 orig = data = RREG32(mmUVD_CGC_CTRL);
1107                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1108                 if (orig != data)
1109                         WREG32(mmUVD_CGC_CTRL, data);
1110         } else {
1111                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1112                 data &= ~0xfff;
1113                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1114
1115                 orig = data = RREG32(mmUVD_CGC_CTRL);
1116                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1117                 if (orig != data)
1118                         WREG32(mmUVD_CGC_CTRL, data);
1119         }
1120 }
1121
1122 static int uvd_v6_0_set_clockgating_state(void *handle,
1123                                           enum amd_clockgating_state state)
1124 {
1125         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1126         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1127
1128         if (enable) {
1129                 /* wait for STATUS to clear */
1130                 if (uvd_v6_0_wait_for_idle(handle))
1131                         return -EBUSY;
1132                 uvd_v6_0_enable_clock_gating(adev, true);
1133                 /* enable HW gates because UVD is idle */
1134 /*              uvd_v6_0_set_hw_clock_gating(adev); */
1135         } else {
1136                 /* disable HW gating and enable Sw gating */
1137                 uvd_v6_0_enable_clock_gating(adev, false);
1138         }
1139         uvd_v6_0_set_sw_clock_gating(adev);
1140         return 0;
1141 }
1142
1143 static int uvd_v6_0_set_powergating_state(void *handle,
1144                                           enum amd_powergating_state state)
1145 {
1146         /* This doesn't actually powergate the UVD block.
1147          * That's done in the dpm code via the SMC.  This
1148          * just re-inits the block as necessary.  The actual
1149          * gating still happens in the dpm code.  We should
1150          * revisit this when there is a cleaner line between
1151          * the smc and the hw blocks
1152          */
1153         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1154         int ret = 0;
1155
1156         WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1157
1158         if (state == AMD_PG_STATE_GATE) {
1159                 uvd_v6_0_stop(adev);
1160         } else {
1161                 ret = uvd_v6_0_start(adev);
1162                 if (ret)
1163                         goto out;
1164         }
1165
1166 out:
1167         return ret;
1168 }
1169
1170 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1171 {
1172         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173         int data;
1174
1175         mutex_lock(&adev->pm.mutex);
1176
1177         if (adev->flags & AMD_IS_APU)
1178                 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1179         else
1180                 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1181
1182         if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1183                 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1184                 goto out;
1185         }
1186
1187         /* AMD_CG_SUPPORT_UVD_MGCG */
1188         data = RREG32(mmUVD_CGC_CTRL);
1189         if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1190                 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1191
1192 out:
1193         mutex_unlock(&adev->pm.mutex);
1194 }
1195
1196 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1197         .name = "uvd_v6_0",
1198         .early_init = uvd_v6_0_early_init,
1199         .late_init = NULL,
1200         .sw_init = uvd_v6_0_sw_init,
1201         .sw_fini = uvd_v6_0_sw_fini,
1202         .hw_init = uvd_v6_0_hw_init,
1203         .hw_fini = uvd_v6_0_hw_fini,
1204         .suspend = uvd_v6_0_suspend,
1205         .resume = uvd_v6_0_resume,
1206         .is_idle = uvd_v6_0_is_idle,
1207         .wait_for_idle = uvd_v6_0_wait_for_idle,
1208         .check_soft_reset = uvd_v6_0_check_soft_reset,
1209         .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1210         .soft_reset = uvd_v6_0_soft_reset,
1211         .post_soft_reset = uvd_v6_0_post_soft_reset,
1212         .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1213         .set_powergating_state = uvd_v6_0_set_powergating_state,
1214         .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1215 };
1216
1217 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1218         .type = AMDGPU_RING_TYPE_UVD,
1219         .align_mask = 0xf,
1220         .nop = PACKET0(mmUVD_NO_OP, 0),
1221         .support_64bit_ptrs = false,
1222         .get_rptr = uvd_v6_0_ring_get_rptr,
1223         .get_wptr = uvd_v6_0_ring_get_wptr,
1224         .set_wptr = uvd_v6_0_ring_set_wptr,
1225         .parse_cs = amdgpu_uvd_ring_parse_cs,
1226         .emit_frame_size =
1227                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1228                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1229                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1230                 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1231         .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1232         .emit_ib = uvd_v6_0_ring_emit_ib,
1233         .emit_fence = uvd_v6_0_ring_emit_fence,
1234         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1235         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1236         .test_ring = uvd_v6_0_ring_test_ring,
1237         .test_ib = amdgpu_uvd_ring_test_ib,
1238         .insert_nop = amdgpu_ring_insert_nop,
1239         .pad_ib = amdgpu_ring_generic_pad_ib,
1240         .begin_use = amdgpu_uvd_ring_begin_use,
1241         .end_use = amdgpu_uvd_ring_end_use,
1242 };
1243
1244 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1245         .type = AMDGPU_RING_TYPE_UVD,
1246         .align_mask = 0xf,
1247         .nop = PACKET0(mmUVD_NO_OP, 0),
1248         .support_64bit_ptrs = false,
1249         .get_rptr = uvd_v6_0_ring_get_rptr,
1250         .get_wptr = uvd_v6_0_ring_get_wptr,
1251         .set_wptr = uvd_v6_0_ring_set_wptr,
1252         .emit_frame_size =
1253                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1254                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1255                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1256                 20 + /* uvd_v6_0_ring_emit_vm_flush */
1257                 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1258         .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1259         .emit_ib = uvd_v6_0_ring_emit_ib,
1260         .emit_fence = uvd_v6_0_ring_emit_fence,
1261         .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1262         .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1263         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1264         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1265         .test_ring = uvd_v6_0_ring_test_ring,
1266         .test_ib = amdgpu_uvd_ring_test_ib,
1267         .insert_nop = amdgpu_ring_insert_nop,
1268         .pad_ib = amdgpu_ring_generic_pad_ib,
1269         .begin_use = amdgpu_uvd_ring_begin_use,
1270         .end_use = amdgpu_uvd_ring_end_use,
1271 };
1272
1273 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1274 {
1275         if (adev->asic_type >= CHIP_POLARIS10) {
1276                 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1277                 DRM_INFO("UVD is enabled in VM mode\n");
1278         } else {
1279                 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1280                 DRM_INFO("UVD is enabled in physical mode\n");
1281         }
1282 }
1283
1284 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1285         .set = uvd_v6_0_set_interrupt_state,
1286         .process = uvd_v6_0_process_interrupt,
1287 };
1288
1289 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1290 {
1291         adev->uvd.irq.num_types = 1;
1292         adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1293 }
1294
1295 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1296 {
1297                 .type = AMD_IP_BLOCK_TYPE_UVD,
1298                 .major = 6,
1299                 .minor = 0,
1300                 .rev = 0,
1301                 .funcs = &uvd_v6_0_ip_funcs,
1302 };
1303
1304 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1305 {
1306                 .type = AMD_IP_BLOCK_TYPE_UVD,
1307                 .major = 6,
1308                 .minor = 2,
1309                 .rev = 0,
1310                 .funcs = &uvd_v6_0_ip_funcs,
1311 };
1312
1313 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1314 {
1315                 .type = AMD_IP_BLOCK_TYPE_UVD,
1316                 .major = 6,
1317                 .minor = 3,
1318                 .rev = 0,
1319                 .funcs = &uvd_v6_0_ip_funcs,
1320 };