2cde0b4046db6f5a30bd4f23fa0d615e4ee57c02
[muen/linux.git] / drivers / gpu / drm / amd / amdgpu / vcn_v1_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_common.h"
31
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
37
38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
39
40 static int vcn_v1_0_stop(struct amdgpu_device *adev);
41 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
42 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
43 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
44 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
45 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
46
47 /**
48  * vcn_v1_0_early_init - set function pointers
49  *
50  * @handle: amdgpu_device pointer
51  *
52  * Set ring and irq function pointers
53  */
54 static int vcn_v1_0_early_init(void *handle)
55 {
56         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
57
58         adev->vcn.num_enc_rings = 2;
59
60         vcn_v1_0_set_dec_ring_funcs(adev);
61         vcn_v1_0_set_enc_ring_funcs(adev);
62         vcn_v1_0_set_jpeg_ring_funcs(adev);
63         vcn_v1_0_set_irq_funcs(adev);
64
65         return 0;
66 }
67
68 /**
69  * vcn_v1_0_sw_init - sw init for VCN block
70  *
71  * @handle: amdgpu_device pointer
72  *
73  * Load firmware and sw initialization
74  */
75 static int vcn_v1_0_sw_init(void *handle)
76 {
77         struct amdgpu_ring *ring;
78         int i, r;
79         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
80
81         /* VCN DEC TRAP */
82         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
83         if (r)
84                 return r;
85
86         /* VCN ENC TRAP */
87         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
88                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
89                                         &adev->vcn.irq);
90                 if (r)
91                         return r;
92         }
93
94         /* VCN JPEG TRAP */
95         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
96         if (r)
97                 return r;
98
99         r = amdgpu_vcn_sw_init(adev);
100         if (r)
101                 return r;
102
103         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
104                 const struct common_firmware_header *hdr;
105                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
106                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
107                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
108                 adev->firmware.fw_size +=
109                         ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
110                 DRM_INFO("PSP loading VCN firmware\n");
111         }
112
113         r = amdgpu_vcn_resume(adev);
114         if (r)
115                 return r;
116
117         ring = &adev->vcn.ring_dec;
118         sprintf(ring->name, "vcn_dec");
119         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
120         if (r)
121                 return r;
122
123         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
124                 ring = &adev->vcn.ring_enc[i];
125                 sprintf(ring->name, "vcn_enc%d", i);
126                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
127                 if (r)
128                         return r;
129         }
130
131         ring = &adev->vcn.ring_jpeg;
132         sprintf(ring->name, "vcn_jpeg");
133         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
134         if (r)
135                 return r;
136
137         return r;
138 }
139
140 /**
141  * vcn_v1_0_sw_fini - sw fini for VCN block
142  *
143  * @handle: amdgpu_device pointer
144  *
145  * VCN suspend and free up sw allocation
146  */
147 static int vcn_v1_0_sw_fini(void *handle)
148 {
149         int r;
150         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
151
152         r = amdgpu_vcn_suspend(adev);
153         if (r)
154                 return r;
155
156         r = amdgpu_vcn_sw_fini(adev);
157
158         return r;
159 }
160
161 /**
162  * vcn_v1_0_hw_init - start and test VCN block
163  *
164  * @handle: amdgpu_device pointer
165  *
166  * Initialize the hardware, boot up the VCPU and do some testing
167  */
168 static int vcn_v1_0_hw_init(void *handle)
169 {
170         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
171         struct amdgpu_ring *ring = &adev->vcn.ring_dec;
172         int i, r;
173
174         ring->ready = true;
175         r = amdgpu_ring_test_ring(ring);
176         if (r) {
177                 ring->ready = false;
178                 goto done;
179         }
180
181         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
182                 ring = &adev->vcn.ring_enc[i];
183                 ring->ready = true;
184                 r = amdgpu_ring_test_ring(ring);
185                 if (r) {
186                         ring->ready = false;
187                         goto done;
188                 }
189         }
190
191         ring = &adev->vcn.ring_jpeg;
192         ring->ready = true;
193         r = amdgpu_ring_test_ring(ring);
194         if (r) {
195                 ring->ready = false;
196                 goto done;
197         }
198
199 done:
200         if (!r)
201                 DRM_INFO("VCN decode and encode initialized successfully.\n");
202
203         return r;
204 }
205
206 /**
207  * vcn_v1_0_hw_fini - stop the hardware block
208  *
209  * @handle: amdgpu_device pointer
210  *
211  * Stop the VCN block, mark ring as not ready any more
212  */
213 static int vcn_v1_0_hw_fini(void *handle)
214 {
215         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216         struct amdgpu_ring *ring = &adev->vcn.ring_dec;
217
218         if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
219                 vcn_v1_0_stop(adev);
220
221         ring->ready = false;
222
223         return 0;
224 }
225
226 /**
227  * vcn_v1_0_suspend - suspend VCN block
228  *
229  * @handle: amdgpu_device pointer
230  *
231  * HW fini and suspend VCN block
232  */
233 static int vcn_v1_0_suspend(void *handle)
234 {
235         int r;
236         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
237
238         r = vcn_v1_0_hw_fini(adev);
239         if (r)
240                 return r;
241
242         r = amdgpu_vcn_suspend(adev);
243
244         return r;
245 }
246
247 /**
248  * vcn_v1_0_resume - resume VCN block
249  *
250  * @handle: amdgpu_device pointer
251  *
252  * Resume firmware and hw init VCN block
253  */
254 static int vcn_v1_0_resume(void *handle)
255 {
256         int r;
257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258
259         r = amdgpu_vcn_resume(adev);
260         if (r)
261                 return r;
262
263         r = vcn_v1_0_hw_init(adev);
264
265         return r;
266 }
267
268 /**
269  * vcn_v1_0_mc_resume - memory controller programming
270  *
271  * @adev: amdgpu_device pointer
272  *
273  * Let the VCN memory controller know it's offsets
274  */
275 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
276 {
277         uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
278         uint32_t offset;
279
280         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
281                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
282                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
283                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
284                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
285                 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
286                 offset = 0;
287         } else {
288                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
289                         lower_32_bits(adev->vcn.gpu_addr));
290                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
291                         upper_32_bits(adev->vcn.gpu_addr));
292                 offset = size;
293                 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
294                              AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
295         }
296
297         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
298
299         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
300                      lower_32_bits(adev->vcn.gpu_addr + offset));
301         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
302                      upper_32_bits(adev->vcn.gpu_addr + offset));
303         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
304         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
305
306         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
307                      lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
308         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
309                      upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
310         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
311         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
312                         AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
313
314         WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
315                         adev->gfx.config.gb_addr_config);
316         WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
317                         adev->gfx.config.gb_addr_config);
318         WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
319                         adev->gfx.config.gb_addr_config);
320 }
321
322 /**
323  * vcn_v1_0_disable_clock_gating - disable VCN clock gating
324  *
325  * @adev: amdgpu_device pointer
326  * @sw: enable SW clock gating
327  *
328  * Disable clock gating for VCN block
329  */
330 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
331 {
332         uint32_t data;
333
334         /* JPEG disable CGC */
335         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
336
337         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
338                 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
339         else
340                 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
341
342         data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
343         data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
344         WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
345
346         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
347         data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
348         WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
349
350         /* UVD disable CGC */
351         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
352         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
353                 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
354         else
355                 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
356
357         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
358         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
359         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
360
361         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
362         data &= ~(UVD_CGC_GATE__SYS_MASK
363                 | UVD_CGC_GATE__UDEC_MASK
364                 | UVD_CGC_GATE__MPEG2_MASK
365                 | UVD_CGC_GATE__REGS_MASK
366                 | UVD_CGC_GATE__RBC_MASK
367                 | UVD_CGC_GATE__LMI_MC_MASK
368                 | UVD_CGC_GATE__LMI_UMC_MASK
369                 | UVD_CGC_GATE__IDCT_MASK
370                 | UVD_CGC_GATE__MPRD_MASK
371                 | UVD_CGC_GATE__MPC_MASK
372                 | UVD_CGC_GATE__LBSI_MASK
373                 | UVD_CGC_GATE__LRBBM_MASK
374                 | UVD_CGC_GATE__UDEC_RE_MASK
375                 | UVD_CGC_GATE__UDEC_CM_MASK
376                 | UVD_CGC_GATE__UDEC_IT_MASK
377                 | UVD_CGC_GATE__UDEC_DB_MASK
378                 | UVD_CGC_GATE__UDEC_MP_MASK
379                 | UVD_CGC_GATE__WCB_MASK
380                 | UVD_CGC_GATE__VCPU_MASK
381                 | UVD_CGC_GATE__SCPU_MASK);
382         WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
383
384         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
385         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
386                 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
387                 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
388                 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
389                 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
390                 | UVD_CGC_CTRL__SYS_MODE_MASK
391                 | UVD_CGC_CTRL__UDEC_MODE_MASK
392                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
393                 | UVD_CGC_CTRL__REGS_MODE_MASK
394                 | UVD_CGC_CTRL__RBC_MODE_MASK
395                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
396                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
397                 | UVD_CGC_CTRL__IDCT_MODE_MASK
398                 | UVD_CGC_CTRL__MPRD_MODE_MASK
399                 | UVD_CGC_CTRL__MPC_MODE_MASK
400                 | UVD_CGC_CTRL__LBSI_MODE_MASK
401                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
402                 | UVD_CGC_CTRL__WCB_MODE_MASK
403                 | UVD_CGC_CTRL__VCPU_MODE_MASK
404                 | UVD_CGC_CTRL__SCPU_MODE_MASK);
405         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
406
407         /* turn on */
408         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
409         data |= (UVD_SUVD_CGC_GATE__SRE_MASK
410                 | UVD_SUVD_CGC_GATE__SIT_MASK
411                 | UVD_SUVD_CGC_GATE__SMP_MASK
412                 | UVD_SUVD_CGC_GATE__SCM_MASK
413                 | UVD_SUVD_CGC_GATE__SDB_MASK
414                 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
415                 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
416                 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
417                 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
418                 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
419                 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
420                 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
421                 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
422                 | UVD_SUVD_CGC_GATE__SCLR_MASK
423                 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
424                 | UVD_SUVD_CGC_GATE__ENT_MASK
425                 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
426                 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
427                 | UVD_SUVD_CGC_GATE__SITE_MASK
428                 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
429                 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
430                 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
431                 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
432                 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
433         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
434
435         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
436         data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
437                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
438                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
439                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
440                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
441                 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
442                 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
443                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
444                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
445                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
446         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
447 }
448
449 /**
450  * vcn_v1_0_enable_clock_gating - enable VCN clock gating
451  *
452  * @adev: amdgpu_device pointer
453  * @sw: enable SW clock gating
454  *
455  * Enable clock gating for VCN block
456  */
457 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
458 {
459         uint32_t data = 0;
460
461         /* enable JPEG CGC */
462         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
463         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
464                 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
465         else
466                 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
467         data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
468         data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
469         WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
470
471         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
472         data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
473         WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
474
475         /* enable UVD CGC */
476         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
477         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
478                 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
479         else
480                 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
481         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
482         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
483         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
484
485         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
486         data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
487                 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
488                 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
489                 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
490                 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
491                 | UVD_CGC_CTRL__SYS_MODE_MASK
492                 | UVD_CGC_CTRL__UDEC_MODE_MASK
493                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
494                 | UVD_CGC_CTRL__REGS_MODE_MASK
495                 | UVD_CGC_CTRL__RBC_MODE_MASK
496                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
497                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
498                 | UVD_CGC_CTRL__IDCT_MODE_MASK
499                 | UVD_CGC_CTRL__MPRD_MODE_MASK
500                 | UVD_CGC_CTRL__MPC_MODE_MASK
501                 | UVD_CGC_CTRL__LBSI_MODE_MASK
502                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
503                 | UVD_CGC_CTRL__WCB_MODE_MASK
504                 | UVD_CGC_CTRL__VCPU_MODE_MASK
505                 | UVD_CGC_CTRL__SCPU_MODE_MASK);
506         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
507
508         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
509         data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
510                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
511                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
512                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
513                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
514                 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
515                 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
516                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
517                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
518                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
519         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
520 }
521
522 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
523 {
524         uint32_t data = 0;
525         int ret;
526
527         if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
528                 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
529                         | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
530                         | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
531                         | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
532                         | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
533                         | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
534                         | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
535                         | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
536                         | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
537                         | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
538                         | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
539
540                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
541                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
542         } else {
543                 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
544                         | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
545                         | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
546                         | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
547                         | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
548                         | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
549                         | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
550                         | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
551                         | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
552                         | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
553                         | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
554                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
555                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFFFFF, ret);
556         }
557
558         /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
559
560         data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
561         data &= ~0x103;
562         if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
563                 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
564
565         WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
566 }
567
568 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
569 {
570         uint32_t data = 0;
571         int ret;
572
573         if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
574                 /* Before power off, this indicator has to be turned on */
575                 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
576                 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
577                 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
578                 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
579
580
581                 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
582                         | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
583                         | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
584                         | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
585                         | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
586                         | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
587                         | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
588                         | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
589                         | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
590                         | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
591                         | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
592
593                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
594
595                 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
596                         | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
597                         | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
598                         | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
599                         | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
600                         | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
601                         | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
602                         | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
603                         | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
604                         | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
605                         | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
606                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
607         }
608 }
609
610 /**
611  * vcn_v1_0_start - start VCN block
612  *
613  * @adev: amdgpu_device pointer
614  *
615  * Setup and start the VCN block
616  */
617 static int vcn_v1_0_start(struct amdgpu_device *adev)
618 {
619         struct amdgpu_ring *ring = &adev->vcn.ring_dec;
620         uint32_t rb_bufsz, tmp;
621         uint32_t lmi_swap_cntl;
622         int i, j, r;
623
624         /* disable byte swapping */
625         lmi_swap_cntl = 0;
626
627         vcn_1_0_disable_static_power_gating(adev);
628         /* disable clock gating */
629         vcn_v1_0_disable_clock_gating(adev);
630
631         vcn_v1_0_mc_resume(adev);
632
633         /* disable interupt */
634         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
635                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
636
637         /* stall UMC and register bus before resetting VCPU */
638         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
639                         UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
640                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
641         mdelay(1);
642
643         /* put LMI, VCPU, RBC etc... into reset */
644         WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
645                 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
646                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
647                 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
648                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
649                 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
650                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
651                 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
652                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
653         mdelay(5);
654
655         /* initialize VCN memory controller */
656         WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
657                 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
658                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
659                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
660                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
661                 UVD_LMI_CTRL__REQ_MODE_MASK |
662                 0x00100000L);
663
664 #ifdef __BIG_ENDIAN
665         /* swap (8 in 32) RB and IB */
666         lmi_swap_cntl = 0xa;
667 #endif
668         WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
669
670         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
671         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
672         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
673         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
674         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
675         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
676
677         /* take all subblocks out of reset, except VCPU */
678         WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
679                         UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
680         mdelay(5);
681
682         /* enable VCPU clock */
683         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
684                         UVD_VCPU_CNTL__CLK_EN_MASK);
685
686         /* enable UMC */
687         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
688                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
689
690         /* boot up the VCPU */
691         WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
692         mdelay(10);
693
694         for (i = 0; i < 10; ++i) {
695                 uint32_t status;
696
697                 for (j = 0; j < 100; ++j) {
698                         status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
699                         if (status & 2)
700                                 break;
701                         mdelay(10);
702                 }
703                 r = 0;
704                 if (status & 2)
705                         break;
706
707                 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
708                 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
709                                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
710                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
711                 mdelay(10);
712                 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
713                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
714                 mdelay(10);
715                 r = -1;
716         }
717
718         if (r) {
719                 DRM_ERROR("VCN decode not responding, giving up!!!\n");
720                 return r;
721         }
722         /* enable master interrupt */
723         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
724                 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
725                 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
726
727         /* enable system interrupt for JRBC, TODO: move to set interrupt*/
728         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
729                 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
730                 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
731
732         /* clear the bit 4 of VCN_STATUS */
733         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
734                         ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
735
736         /* force RBC into idle state */
737         rb_bufsz = order_base_2(ring->ring_size);
738         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
739         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
740         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
741         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
742         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
743         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
744         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
745
746         /* set the write pointer delay */
747         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
748
749         /* set the wb address */
750         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
751                         (upper_32_bits(ring->gpu_addr) >> 2));
752
753         /* programm the RB_BASE for ring buffer */
754         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
755                         lower_32_bits(ring->gpu_addr));
756         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
757                         upper_32_bits(ring->gpu_addr));
758
759         /* Initialize the ring buffer's read and write pointers */
760         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
761
762         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
763         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
764                         lower_32_bits(ring->wptr));
765
766         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
767                         ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
768
769         ring = &adev->vcn.ring_enc[0];
770         WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
771         WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
772         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
773         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
774         WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
775
776         ring = &adev->vcn.ring_enc[1];
777         WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
778         WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
779         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
780         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
781         WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
782
783         ring = &adev->vcn.ring_jpeg;
784         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
785         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
786         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
787         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
788         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
789         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
790         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
791
792         /* initialize wptr */
793         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
794
795         /* copy patch commands to the jpeg ring */
796         vcn_v1_0_jpeg_ring_set_patch_ring(ring,
797                 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
798
799         return 0;
800 }
801
802 /**
803  * vcn_v1_0_stop - stop VCN block
804  *
805  * @adev: amdgpu_device pointer
806  *
807  * stop the VCN block
808  */
809 static int vcn_v1_0_stop(struct amdgpu_device *adev)
810 {
811         /* force RBC into idle state */
812         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
813
814         /* Stall UMC and register bus before resetting VCPU */
815         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
816                         UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
817                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
818         mdelay(1);
819
820         /* put VCPU into reset */
821         WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
822                         UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
823         mdelay(5);
824
825         /* disable VCPU clock */
826         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
827
828         /* Unstall UMC and register bus */
829         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
830                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
831
832         WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
833
834         vcn_v1_0_enable_clock_gating(adev);
835         vcn_1_0_enable_static_power_gating(adev);
836         return 0;
837 }
838
839 static bool vcn_v1_0_is_idle(void *handle)
840 {
841         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
842
843         return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
844 }
845
846 static int vcn_v1_0_wait_for_idle(void *handle)
847 {
848         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849         int ret = 0;
850
851         SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
852
853         return ret;
854 }
855
856 static int vcn_v1_0_set_clockgating_state(void *handle,
857                                           enum amd_clockgating_state state)
858 {
859         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
860         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
861
862         if (enable) {
863                 /* wait for STATUS to clear */
864                 if (vcn_v1_0_is_idle(handle))
865                         return -EBUSY;
866                 vcn_v1_0_enable_clock_gating(adev);
867         } else {
868                 /* disable HW gating and enable Sw gating */
869                 vcn_v1_0_disable_clock_gating(adev);
870         }
871         return 0;
872 }
873
874 /**
875  * vcn_v1_0_dec_ring_get_rptr - get read pointer
876  *
877  * @ring: amdgpu_ring pointer
878  *
879  * Returns the current hardware read pointer
880  */
881 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
882 {
883         struct amdgpu_device *adev = ring->adev;
884
885         return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
886 }
887
888 /**
889  * vcn_v1_0_dec_ring_get_wptr - get write pointer
890  *
891  * @ring: amdgpu_ring pointer
892  *
893  * Returns the current hardware write pointer
894  */
895 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
896 {
897         struct amdgpu_device *adev = ring->adev;
898
899         return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
900 }
901
902 /**
903  * vcn_v1_0_dec_ring_set_wptr - set write pointer
904  *
905  * @ring: amdgpu_ring pointer
906  *
907  * Commits the write pointer to the hardware
908  */
909 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
910 {
911         struct amdgpu_device *adev = ring->adev;
912
913         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
914 }
915
916 /**
917  * vcn_v1_0_dec_ring_insert_start - insert a start command
918  *
919  * @ring: amdgpu_ring pointer
920  *
921  * Write a start command to the ring.
922  */
923 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
924 {
925         struct amdgpu_device *adev = ring->adev;
926
927         amdgpu_ring_write(ring,
928                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
929         amdgpu_ring_write(ring, 0);
930         amdgpu_ring_write(ring,
931                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
932         amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
933 }
934
935 /**
936  * vcn_v1_0_dec_ring_insert_end - insert a end command
937  *
938  * @ring: amdgpu_ring pointer
939  *
940  * Write a end command to the ring.
941  */
942 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
943 {
944         struct amdgpu_device *adev = ring->adev;
945
946         amdgpu_ring_write(ring,
947                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
948         amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
949 }
950
951 /**
952  * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
953  *
954  * @ring: amdgpu_ring pointer
955  * @fence: fence to emit
956  *
957  * Write a fence and a trap command to the ring.
958  */
959 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
960                                      unsigned flags)
961 {
962         struct amdgpu_device *adev = ring->adev;
963
964         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
965
966         amdgpu_ring_write(ring,
967                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
968         amdgpu_ring_write(ring, seq);
969         amdgpu_ring_write(ring,
970                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
971         amdgpu_ring_write(ring, addr & 0xffffffff);
972         amdgpu_ring_write(ring,
973                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
974         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
975         amdgpu_ring_write(ring,
976                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
977         amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
978
979         amdgpu_ring_write(ring,
980                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
981         amdgpu_ring_write(ring, 0);
982         amdgpu_ring_write(ring,
983                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
984         amdgpu_ring_write(ring, 0);
985         amdgpu_ring_write(ring,
986                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
987         amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
988 }
989
990 /**
991  * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
992  *
993  * @ring: amdgpu_ring pointer
994  * @ib: indirect buffer to execute
995  *
996  * Write ring commands to execute the indirect buffer
997  */
998 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
999                                   struct amdgpu_ib *ib,
1000                                   unsigned vmid, bool ctx_switch)
1001 {
1002         struct amdgpu_device *adev = ring->adev;
1003
1004         amdgpu_ring_write(ring,
1005                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1006         amdgpu_ring_write(ring, vmid);
1007
1008         amdgpu_ring_write(ring,
1009                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1010         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1011         amdgpu_ring_write(ring,
1012                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1013         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1014         amdgpu_ring_write(ring,
1015                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1016         amdgpu_ring_write(ring, ib->length_dw);
1017 }
1018
1019 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1020                                             uint32_t reg, uint32_t val,
1021                                             uint32_t mask)
1022 {
1023         struct amdgpu_device *adev = ring->adev;
1024
1025         amdgpu_ring_write(ring,
1026                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1027         amdgpu_ring_write(ring, reg << 2);
1028         amdgpu_ring_write(ring,
1029                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1030         amdgpu_ring_write(ring, val);
1031         amdgpu_ring_write(ring,
1032                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1033         amdgpu_ring_write(ring, mask);
1034         amdgpu_ring_write(ring,
1035                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1036         amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1037 }
1038
1039 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1040                                             unsigned vmid, uint64_t pd_addr)
1041 {
1042         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1043         uint32_t data0, data1, mask;
1044
1045         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1046
1047         /* wait for register write */
1048         data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1049         data1 = lower_32_bits(pd_addr);
1050         mask = 0xffffffff;
1051         vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1052 }
1053
1054 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1055                                         uint32_t reg, uint32_t val)
1056 {
1057         struct amdgpu_device *adev = ring->adev;
1058
1059         amdgpu_ring_write(ring,
1060                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1061         amdgpu_ring_write(ring, reg << 2);
1062         amdgpu_ring_write(ring,
1063                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1064         amdgpu_ring_write(ring, val);
1065         amdgpu_ring_write(ring,
1066                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1067         amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1068 }
1069
1070 /**
1071  * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1072  *
1073  * @ring: amdgpu_ring pointer
1074  *
1075  * Returns the current hardware enc read pointer
1076  */
1077 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1078 {
1079         struct amdgpu_device *adev = ring->adev;
1080
1081         if (ring == &adev->vcn.ring_enc[0])
1082                 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1083         else
1084                 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1085 }
1086
1087  /**
1088  * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1089  *
1090  * @ring: amdgpu_ring pointer
1091  *
1092  * Returns the current hardware enc write pointer
1093  */
1094 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1095 {
1096         struct amdgpu_device *adev = ring->adev;
1097
1098         if (ring == &adev->vcn.ring_enc[0])
1099                 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1100         else
1101                 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1102 }
1103
1104  /**
1105  * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1106  *
1107  * @ring: amdgpu_ring pointer
1108  *
1109  * Commits the enc write pointer to the hardware
1110  */
1111 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1112 {
1113         struct amdgpu_device *adev = ring->adev;
1114
1115         if (ring == &adev->vcn.ring_enc[0])
1116                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1117                         lower_32_bits(ring->wptr));
1118         else
1119                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1120                         lower_32_bits(ring->wptr));
1121 }
1122
1123 /**
1124  * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1125  *
1126  * @ring: amdgpu_ring pointer
1127  * @fence: fence to emit
1128  *
1129  * Write enc a fence and a trap command to the ring.
1130  */
1131 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1132                         u64 seq, unsigned flags)
1133 {
1134         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1135
1136         amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1137         amdgpu_ring_write(ring, addr);
1138         amdgpu_ring_write(ring, upper_32_bits(addr));
1139         amdgpu_ring_write(ring, seq);
1140         amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1141 }
1142
1143 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1144 {
1145         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1146 }
1147
1148 /**
1149  * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1150  *
1151  * @ring: amdgpu_ring pointer
1152  * @ib: indirect buffer to execute
1153  *
1154  * Write enc ring commands to execute the indirect buffer
1155  */
1156 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1157                 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1158 {
1159         amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1160         amdgpu_ring_write(ring, vmid);
1161         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1162         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1163         amdgpu_ring_write(ring, ib->length_dw);
1164 }
1165
1166 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1167                                             uint32_t reg, uint32_t val,
1168                                             uint32_t mask)
1169 {
1170         amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1171         amdgpu_ring_write(ring, reg << 2);
1172         amdgpu_ring_write(ring, mask);
1173         amdgpu_ring_write(ring, val);
1174 }
1175
1176 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1177                                             unsigned int vmid, uint64_t pd_addr)
1178 {
1179         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1180
1181         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1182
1183         /* wait for reg writes */
1184         vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1185                                         lower_32_bits(pd_addr), 0xffffffff);
1186 }
1187
1188 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1189                                         uint32_t reg, uint32_t val)
1190 {
1191         amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1192         amdgpu_ring_write(ring, reg << 2);
1193         amdgpu_ring_write(ring, val);
1194 }
1195
1196
1197 /**
1198  * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1199  *
1200  * @ring: amdgpu_ring pointer
1201  *
1202  * Returns the current hardware read pointer
1203  */
1204 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1205 {
1206         struct amdgpu_device *adev = ring->adev;
1207
1208         return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1209 }
1210
1211 /**
1212  * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1213  *
1214  * @ring: amdgpu_ring pointer
1215  *
1216  * Returns the current hardware write pointer
1217  */
1218 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1219 {
1220         struct amdgpu_device *adev = ring->adev;
1221
1222         return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1223 }
1224
1225 /**
1226  * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1227  *
1228  * @ring: amdgpu_ring pointer
1229  *
1230  * Commits the write pointer to the hardware
1231  */
1232 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1233 {
1234         struct amdgpu_device *adev = ring->adev;
1235
1236         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1237 }
1238
1239 /**
1240  * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1241  *
1242  * @ring: amdgpu_ring pointer
1243  *
1244  * Write a start command to the ring.
1245  */
1246 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1247 {
1248         struct amdgpu_device *adev = ring->adev;
1249
1250         amdgpu_ring_write(ring,
1251                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1252         amdgpu_ring_write(ring, 0x68e04);
1253
1254         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1255         amdgpu_ring_write(ring, 0x80010000);
1256 }
1257
1258 /**
1259  * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1260  *
1261  * @ring: amdgpu_ring pointer
1262  *
1263  * Write a end command to the ring.
1264  */
1265 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1266 {
1267         struct amdgpu_device *adev = ring->adev;
1268
1269         amdgpu_ring_write(ring,
1270                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1271         amdgpu_ring_write(ring, 0x68e04);
1272
1273         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1274         amdgpu_ring_write(ring, 0x00010000);
1275 }
1276
1277 /**
1278  * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1279  *
1280  * @ring: amdgpu_ring pointer
1281  * @fence: fence to emit
1282  *
1283  * Write a fence and a trap command to the ring.
1284  */
1285 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1286                                      unsigned flags)
1287 {
1288         struct amdgpu_device *adev = ring->adev;
1289
1290         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1291
1292         amdgpu_ring_write(ring,
1293                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1294         amdgpu_ring_write(ring, seq);
1295
1296         amdgpu_ring_write(ring,
1297                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1298         amdgpu_ring_write(ring, seq);
1299
1300         amdgpu_ring_write(ring,
1301                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1302         amdgpu_ring_write(ring, lower_32_bits(addr));
1303
1304         amdgpu_ring_write(ring,
1305                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1306         amdgpu_ring_write(ring, upper_32_bits(addr));
1307
1308         amdgpu_ring_write(ring,
1309                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1310         amdgpu_ring_write(ring, 0x8);
1311
1312         amdgpu_ring_write(ring,
1313                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1314         amdgpu_ring_write(ring, 0);
1315
1316         amdgpu_ring_write(ring,
1317                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1318         amdgpu_ring_write(ring, 0x01400200);
1319
1320         amdgpu_ring_write(ring,
1321                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1322         amdgpu_ring_write(ring, seq);
1323
1324         amdgpu_ring_write(ring,
1325                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1326         amdgpu_ring_write(ring, lower_32_bits(addr));
1327
1328         amdgpu_ring_write(ring,
1329                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1330         amdgpu_ring_write(ring, upper_32_bits(addr));
1331
1332         amdgpu_ring_write(ring,
1333                 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1334         amdgpu_ring_write(ring, 0xffffffff);
1335
1336         amdgpu_ring_write(ring,
1337                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1338         amdgpu_ring_write(ring, 0x3fbc);
1339
1340         amdgpu_ring_write(ring,
1341                 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1342         amdgpu_ring_write(ring, 0x1);
1343
1344         /* emit trap */
1345         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1346         amdgpu_ring_write(ring, 0);
1347 }
1348
1349 /**
1350  * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1351  *
1352  * @ring: amdgpu_ring pointer
1353  * @ib: indirect buffer to execute
1354  *
1355  * Write ring commands to execute the indirect buffer.
1356  */
1357 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1358                                   struct amdgpu_ib *ib,
1359                                   unsigned vmid, bool ctx_switch)
1360 {
1361         struct amdgpu_device *adev = ring->adev;
1362
1363         amdgpu_ring_write(ring,
1364                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1365         amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1366
1367         amdgpu_ring_write(ring,
1368                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1369         amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1370
1371         amdgpu_ring_write(ring,
1372                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1373         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1374
1375         amdgpu_ring_write(ring,
1376                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1377         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1378
1379         amdgpu_ring_write(ring,
1380                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1381         amdgpu_ring_write(ring, ib->length_dw);
1382
1383         amdgpu_ring_write(ring,
1384                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1385         amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1386
1387         amdgpu_ring_write(ring,
1388                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1389         amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1390
1391         amdgpu_ring_write(ring,
1392                 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1393         amdgpu_ring_write(ring, 0);
1394
1395         amdgpu_ring_write(ring,
1396                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1397         amdgpu_ring_write(ring, 0x01400200);
1398
1399         amdgpu_ring_write(ring,
1400                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1401         amdgpu_ring_write(ring, 0x2);
1402
1403         amdgpu_ring_write(ring,
1404                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1405         amdgpu_ring_write(ring, 0x2);
1406 }
1407
1408 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1409                                             uint32_t reg, uint32_t val,
1410                                             uint32_t mask)
1411 {
1412         struct amdgpu_device *adev = ring->adev;
1413         uint32_t reg_offset = (reg << 2);
1414
1415         amdgpu_ring_write(ring,
1416                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1417         amdgpu_ring_write(ring, 0x01400200);
1418
1419         amdgpu_ring_write(ring,
1420                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1421         amdgpu_ring_write(ring, val);
1422
1423         amdgpu_ring_write(ring,
1424                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1425         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1426                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1427                 amdgpu_ring_write(ring, 0);
1428                 amdgpu_ring_write(ring,
1429                         PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1430         } else {
1431                 amdgpu_ring_write(ring, reg_offset);
1432                 amdgpu_ring_write(ring,
1433                         PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1434         }
1435         amdgpu_ring_write(ring, mask);
1436 }
1437
1438 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1439                 unsigned vmid, uint64_t pd_addr)
1440 {
1441         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1442         uint32_t data0, data1, mask;
1443
1444         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1445
1446         /* wait for register write */
1447         data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1448         data1 = lower_32_bits(pd_addr);
1449         mask = 0xffffffff;
1450         vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1451 }
1452
1453 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1454                                         uint32_t reg, uint32_t val)
1455 {
1456         struct amdgpu_device *adev = ring->adev;
1457         uint32_t reg_offset = (reg << 2);
1458
1459         amdgpu_ring_write(ring,
1460                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1461         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1462                         ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1463                 amdgpu_ring_write(ring, 0);
1464                 amdgpu_ring_write(ring,
1465                         PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1466         } else {
1467                 amdgpu_ring_write(ring, reg_offset);
1468                 amdgpu_ring_write(ring,
1469                         PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1470         }
1471         amdgpu_ring_write(ring, val);
1472 }
1473
1474 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
1475 {
1476         int i;
1477
1478         WARN_ON(ring->wptr % 2 || count % 2);
1479
1480         for (i = 0; i < count / 2; i++) {
1481                 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
1482                 amdgpu_ring_write(ring, 0);
1483         }
1484 }
1485
1486 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
1487 {
1488         struct amdgpu_device *adev = ring->adev;
1489         ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1490         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1491                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1492                 ring->ring[(*ptr)++] = 0;
1493                 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
1494         } else {
1495                 ring->ring[(*ptr)++] = reg_offset;
1496                 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
1497         }
1498         ring->ring[(*ptr)++] = val;
1499 }
1500
1501 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
1502 {
1503         struct amdgpu_device *adev = ring->adev;
1504
1505         uint32_t reg, reg_offset, val, mask, i;
1506
1507         // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
1508         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
1509         reg_offset = (reg << 2);
1510         val = lower_32_bits(ring->gpu_addr);
1511         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1512
1513         // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
1514         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
1515         reg_offset = (reg << 2);
1516         val = upper_32_bits(ring->gpu_addr);
1517         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1518
1519         // 3rd to 5th: issue MEM_READ commands
1520         for (i = 0; i <= 2; i++) {
1521                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
1522                 ring->ring[ptr++] = 0;
1523         }
1524
1525         // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
1526         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1527         reg_offset = (reg << 2);
1528         val = 0x13;
1529         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1530
1531         // 7th: program mmUVD_JRBC_RB_REF_DATA
1532         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
1533         reg_offset = (reg << 2);
1534         val = 0x1;
1535         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1536
1537         // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
1538         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1539         reg_offset = (reg << 2);
1540         val = 0x1;
1541         mask = 0x1;
1542
1543         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
1544         ring->ring[ptr++] = 0x01400200;
1545         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
1546         ring->ring[ptr++] = val;
1547         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1548         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1549                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1550                 ring->ring[ptr++] = 0;
1551                 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
1552         } else {
1553                 ring->ring[ptr++] = reg_offset;
1554                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
1555         }
1556         ring->ring[ptr++] = mask;
1557
1558         //9th to 21st: insert no-op
1559         for (i = 0; i <= 12; i++) {
1560                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
1561                 ring->ring[ptr++] = 0;
1562         }
1563
1564         //22nd: reset mmUVD_JRBC_RB_RPTR
1565         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
1566         reg_offset = (reg << 2);
1567         val = 0;
1568         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1569
1570         //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
1571         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1572         reg_offset = (reg << 2);
1573         val = 0x12;
1574         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1575 }
1576
1577 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1578                                         struct amdgpu_irq_src *source,
1579                                         unsigned type,
1580                                         enum amdgpu_interrupt_state state)
1581 {
1582         return 0;
1583 }
1584
1585 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1586                                       struct amdgpu_irq_src *source,
1587                                       struct amdgpu_iv_entry *entry)
1588 {
1589         DRM_DEBUG("IH: VCN TRAP\n");
1590
1591         switch (entry->src_id) {
1592         case 124:
1593                 amdgpu_fence_process(&adev->vcn.ring_dec);
1594                 break;
1595         case 119:
1596                 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1597                 break;
1598         case 120:
1599                 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1600                 break;
1601         case 126:
1602                 amdgpu_fence_process(&adev->vcn.ring_jpeg);
1603                 break;
1604         default:
1605                 DRM_ERROR("Unhandled interrupt: %d %d\n",
1606                           entry->src_id, entry->src_data[0]);
1607                 break;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1614 {
1615         struct amdgpu_device *adev = ring->adev;
1616         int i;
1617
1618         WARN_ON(ring->wptr % 2 || count % 2);
1619
1620         for (i = 0; i < count / 2; i++) {
1621                 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1622                 amdgpu_ring_write(ring, 0);
1623         }
1624 }
1625
1626 static int vcn_v1_0_set_powergating_state(void *handle,
1627                                           enum amd_powergating_state state)
1628 {
1629         /* This doesn't actually powergate the VCN block.
1630          * That's done in the dpm code via the SMC.  This
1631          * just re-inits the block as necessary.  The actual
1632          * gating still happens in the dpm code.  We should
1633          * revisit this when there is a cleaner line between
1634          * the smc and the hw blocks
1635          */
1636         int ret;
1637         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638
1639         if(state == adev->vcn.cur_state)
1640                 return 0;
1641
1642         if (state == AMD_PG_STATE_GATE)
1643                 ret = vcn_v1_0_stop(adev);
1644         else
1645                 ret = vcn_v1_0_start(adev);
1646
1647         if(!ret)
1648                 adev->vcn.cur_state = state;
1649         return ret;
1650 }
1651
1652 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1653         .name = "vcn_v1_0",
1654         .early_init = vcn_v1_0_early_init,
1655         .late_init = NULL,
1656         .sw_init = vcn_v1_0_sw_init,
1657         .sw_fini = vcn_v1_0_sw_fini,
1658         .hw_init = vcn_v1_0_hw_init,
1659         .hw_fini = vcn_v1_0_hw_fini,
1660         .suspend = vcn_v1_0_suspend,
1661         .resume = vcn_v1_0_resume,
1662         .is_idle = vcn_v1_0_is_idle,
1663         .wait_for_idle = vcn_v1_0_wait_for_idle,
1664         .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1665         .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1666         .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1667         .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1668         .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1669         .set_powergating_state = vcn_v1_0_set_powergating_state,
1670 };
1671
1672 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1673         .type = AMDGPU_RING_TYPE_VCN_DEC,
1674         .align_mask = 0xf,
1675         .support_64bit_ptrs = false,
1676         .vmhub = AMDGPU_MMHUB,
1677         .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1678         .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1679         .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1680         .emit_frame_size =
1681                 6 + 6 + /* hdp invalidate / flush */
1682                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1683                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1684                 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1685                 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1686                 6,
1687         .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1688         .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1689         .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1690         .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1691         .test_ring = amdgpu_vcn_dec_ring_test_ring,
1692         .test_ib = amdgpu_vcn_dec_ring_test_ib,
1693         .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1694         .insert_start = vcn_v1_0_dec_ring_insert_start,
1695         .insert_end = vcn_v1_0_dec_ring_insert_end,
1696         .pad_ib = amdgpu_ring_generic_pad_ib,
1697         .begin_use = amdgpu_vcn_ring_begin_use,
1698         .end_use = amdgpu_vcn_ring_end_use,
1699         .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1700         .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1701         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1702 };
1703
1704 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1705         .type = AMDGPU_RING_TYPE_VCN_ENC,
1706         .align_mask = 0x3f,
1707         .nop = VCN_ENC_CMD_NO_OP,
1708         .support_64bit_ptrs = false,
1709         .vmhub = AMDGPU_MMHUB,
1710         .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1711         .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1712         .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1713         .emit_frame_size =
1714                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1715                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1716                 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1717                 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1718                 1, /* vcn_v1_0_enc_ring_insert_end */
1719         .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1720         .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1721         .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1722         .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1723         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1724         .test_ib = amdgpu_vcn_enc_ring_test_ib,
1725         .insert_nop = amdgpu_ring_insert_nop,
1726         .insert_end = vcn_v1_0_enc_ring_insert_end,
1727         .pad_ib = amdgpu_ring_generic_pad_ib,
1728         .begin_use = amdgpu_vcn_ring_begin_use,
1729         .end_use = amdgpu_vcn_ring_end_use,
1730         .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1731         .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1732         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1733 };
1734
1735 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
1736         .type = AMDGPU_RING_TYPE_VCN_JPEG,
1737         .align_mask = 0xf,
1738         .nop = PACKET0(0x81ff, 0),
1739         .support_64bit_ptrs = false,
1740         .vmhub = AMDGPU_MMHUB,
1741         .extra_dw = 64,
1742         .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
1743         .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
1744         .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
1745         .emit_frame_size =
1746                 6 + 6 + /* hdp invalidate / flush */
1747                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1748                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1749                 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
1750                 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
1751                 6,
1752         .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
1753         .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
1754         .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
1755         .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
1756         .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1757         .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1758         .insert_nop = vcn_v1_0_jpeg_ring_nop,
1759         .insert_start = vcn_v1_0_jpeg_ring_insert_start,
1760         .insert_end = vcn_v1_0_jpeg_ring_insert_end,
1761         .pad_ib = amdgpu_ring_generic_pad_ib,
1762         .begin_use = amdgpu_vcn_ring_begin_use,
1763         .end_use = amdgpu_vcn_ring_end_use,
1764         .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
1765         .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
1766         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1767 };
1768
1769 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1770 {
1771         adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1772         DRM_INFO("VCN decode is enabled in VM mode\n");
1773 }
1774
1775 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1776 {
1777         int i;
1778
1779         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1780                 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1781
1782         DRM_INFO("VCN encode is enabled in VM mode\n");
1783 }
1784
1785 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1786 {
1787         adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
1788         DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
1789 }
1790
1791 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1792         .set = vcn_v1_0_set_interrupt_state,
1793         .process = vcn_v1_0_process_interrupt,
1794 };
1795
1796 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1797 {
1798         adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
1799         adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1800 }
1801
1802 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1803 {
1804                 .type = AMD_IP_BLOCK_TYPE_VCN,
1805                 .major = 1,
1806                 .minor = 0,
1807                 .rev = 0,
1808                 .funcs = &vcn_v1_0_ip_funcs,
1809 };