drm/amd/powerplay: delete SMUM_WRITE_FIELD
[muen/linux.git] / drivers / gpu / drm / amd / powerplay / smumgr / ci_smc.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/fb.h>
26 #include "linux/delay.h"
27 #include <linux/types.h>
28
29 #include "smumgr.h"
30 #include "pp_debug.h"
31 #include "ci_smc.h"
32 #include "ci_smumgr.h"
33 #include "ppsmc.h"
34 #include "smu7_hwmgr.h"
35 #include "hardwaremanager.h"
36 #include "ppatomctrl.h"
37 #include "cgs_common.h"
38 #include "atombios.h"
39 #include "pppcielanes.h"
40
41 #include "smu/smu_7_0_1_d.h"
42 #include "smu/smu_7_0_1_sh_mask.h"
43
44 #include "dce/dce_8_0_d.h"
45 #include "dce/dce_8_0_sh_mask.h"
46
47 #include "bif/bif_4_1_d.h"
48 #include "bif/bif_4_1_sh_mask.h"
49
50 #include "gca/gfx_7_2_d.h"
51 #include "gca/gfx_7_2_sh_mask.h"
52
53 #include "gmc/gmc_7_1_d.h"
54 #include "gmc/gmc_7_1_sh_mask.h"
55
56 #include "processpptables.h"
57
58 #define MC_CG_ARB_FREQ_F0           0x0a
59 #define MC_CG_ARB_FREQ_F1           0x0b
60 #define MC_CG_ARB_FREQ_F2           0x0c
61 #define MC_CG_ARB_FREQ_F3           0x0d
62
63 #define SMC_RAM_END 0x40000
64
65 #define VOLTAGE_SCALE               4
66 #define VOLTAGE_VID_OFFSET_SCALE1    625
67 #define VOLTAGE_VID_OFFSET_SCALE2    100
68 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
69 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
70
71 static const struct ci_pt_defaults defaults_hawaii_xt = {
72         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
73         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
74         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
75 };
76
77 static const struct ci_pt_defaults defaults_hawaii_pro = {
78         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
79         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
80         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
81 };
82
83 static const struct ci_pt_defaults defaults_bonaire_xt = {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89
90 static const struct ci_pt_defaults defaults_saturn_xt = {
91         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
92         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
93         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
94 };
95
96
97 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98                                         uint32_t smc_addr, uint32_t limit)
99 {
100         if ((0 != (3 & smc_addr))
101                 || ((smc_addr + 3) >= limit)) {
102                 pr_err("smc_addr invalid \n");
103                 return -EINVAL;
104         }
105
106         cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
107         PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
108         return 0;
109 }
110
111 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
112                                 const uint8_t *src, uint32_t byte_count, uint32_t limit)
113 {
114         int result;
115         uint32_t data = 0;
116         uint32_t original_data;
117         uint32_t addr = 0;
118         uint32_t extra_shift;
119
120         if ((3 & smc_start_address)
121                 || ((smc_start_address + byte_count) >= limit)) {
122                 pr_err("smc_start_address invalid \n");
123                 return -EINVAL;
124         }
125
126         addr = smc_start_address;
127
128         while (byte_count >= 4) {
129         /* Bytes are written into the SMC address space with the MSB first. */
130                 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
131
132                 result = ci_set_smc_sram_address(hwmgr, addr, limit);
133
134                 if (0 != result)
135                         return result;
136
137                 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
138
139                 src += 4;
140                 byte_count -= 4;
141                 addr += 4;
142         }
143
144         if (0 != byte_count) {
145
146                 data = 0;
147
148                 result = ci_set_smc_sram_address(hwmgr, addr, limit);
149
150                 if (0 != result)
151                         return result;
152
153
154                 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
155
156                 extra_shift = 8 * (4 - byte_count);
157
158                 while (byte_count > 0) {
159                         /* Bytes are written into the SMC addres space with the MSB first. */
160                         data = (0x100 * data) + *src++;
161                         byte_count--;
162                 }
163
164                 data <<= extra_shift;
165
166                 data |= (original_data & ~((~0UL) << extra_shift));
167
168                 result = ci_set_smc_sram_address(hwmgr, addr, limit);
169
170                 if (0 != result)
171                         return result;
172
173                 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
174         }
175
176         return 0;
177 }
178
179
180 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
181 {
182         static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
183
184         ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
185
186         return 0;
187 }
188
189 bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
190 {
191         return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
192                         CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
193         && (0x20100 <= cgs_read_ind_register(hwmgr->device,
194                         CGS_IND_REG__SMC, ixSMC_PC_C)));
195 }
196
197 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
198                                 uint32_t *value, uint32_t limit)
199 {
200         int result;
201
202         result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
203
204         if (result)
205                 return result;
206
207         *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
208         return 0;
209 }
210
211 int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
212 {
213         int ret;
214
215         if (!ci_is_smc_ram_running(hwmgr))
216                 return -EINVAL;
217
218         cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
219
220         PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
221
222         ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
223
224         if (ret != 1)
225                 pr_info("\n failed to send message %x ret is %d\n",  msg, ret);
226
227         return 0;
228 }
229
230 int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
231                                         uint16_t msg, uint32_t parameter)
232 {
233         cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
234         return ci_send_msg_to_smc(hwmgr, msg);
235 }
236
237 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
238 {
239         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
240         struct cgs_system_info sys_info = {0};
241         uint32_t dev_id;
242
243         sys_info.size = sizeof(struct cgs_system_info);
244         sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
245         cgs_query_system_info(hwmgr->device, &sys_info);
246         dev_id = (uint32_t)sys_info.value;
247
248         switch (dev_id) {
249         case 0x67BA:
250         case 0x66B1:
251                 smu_data->power_tune_defaults = &defaults_hawaii_pro;
252                 break;
253         case 0x67B8:
254         case 0x66B0:
255                 smu_data->power_tune_defaults = &defaults_hawaii_xt;
256                 break;
257         case 0x6640:
258         case 0x6641:
259         case 0x6646:
260         case 0x6647:
261                 smu_data->power_tune_defaults = &defaults_saturn_xt;
262                 break;
263         case 0x6649:
264         case 0x6650:
265         case 0x6651:
266         case 0x6658:
267         case 0x665C:
268         case 0x665D:
269         case 0x67A0:
270         case 0x67A1:
271         case 0x67A2:
272         case 0x67A8:
273         case 0x67A9:
274         case 0x67AA:
275         case 0x67B9:
276         case 0x67BE:
277         default:
278                 smu_data->power_tune_defaults = &defaults_bonaire_xt;
279                 break;
280         }
281 }
282
283 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
284         struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
285         uint32_t clock, uint32_t *vol)
286 {
287         uint32_t i = 0;
288
289         if (allowed_clock_voltage_table->count == 0)
290                 return -EINVAL;
291
292         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
293                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
294                         *vol = allowed_clock_voltage_table->entries[i].v;
295                         return 0;
296                 }
297         }
298
299         *vol = allowed_clock_voltage_table->entries[i - 1].v;
300         return 0;
301 }
302
303 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
304                 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
305 {
306         const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
307         struct pp_atomctrl_clock_dividers_vi dividers;
308         uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
309         uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
310         uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
311         uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
312         uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
313         uint32_t ref_clock;
314         uint32_t ref_divider;
315         uint32_t fbdiv;
316         int result;
317
318         /* get the engine clock dividers for this clock value */
319         result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
320
321         PP_ASSERT_WITH_CODE(result == 0,
322                         "Error retrieving Engine Clock dividers from VBIOS.",
323                         return result);
324
325         /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
326         ref_clock = atomctrl_get_reference_clock(hwmgr);
327         ref_divider = 1 + dividers.uc_pll_ref_div;
328
329         /* low 14 bits is fraction and high 12 bits is divider */
330         fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
331
332         /* SPLL_FUNC_CNTL setup */
333         spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
334                         SPLL_REF_DIV, dividers.uc_pll_ref_div);
335         spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
336                         SPLL_PDIV_A,  dividers.uc_pll_post_div);
337
338         /* SPLL_FUNC_CNTL_3 setup*/
339         spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
340                         SPLL_FB_DIV, fbdiv);
341
342         /* set to use fractional accumulation*/
343         spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
344                         SPLL_DITHEN, 1);
345
346         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
347                                 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
348                 struct pp_atomctrl_internal_ss_info ss_info;
349                 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
350
351                 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
352                                 vco_freq, &ss_info)) {
353                         uint32_t clk_s = ref_clock * 5 /
354                                         (ref_divider * ss_info.speed_spectrum_rate);
355                         uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
356                                         fbdiv / (clk_s * 10000);
357
358                         cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
359                                         CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
360                         cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
361                                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
362                         cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
363                                         CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
364                 }
365         }
366
367         sclk->SclkFrequency        = clock;
368         sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
369         sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
370         sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
371         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
372         sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
373
374         return 0;
375 }
376
377 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
378                                 const struct phm_phase_shedding_limits_table *pl,
379                                         uint32_t sclk, uint32_t *p_shed)
380 {
381         unsigned int i;
382
383         /* use the minimum phase shedding */
384         *p_shed = 1;
385
386         for (i = 0; i < pl->count; i++) {
387                 if (sclk < pl->entries[i].Sclk) {
388                         *p_shed = i;
389                         break;
390                 }
391         }
392 }
393
394 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
395                         uint32_t clock_insr)
396 {
397         uint8_t i;
398         uint32_t temp;
399         uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
400
401         if (clock < min) {
402                 pr_info("Engine clock can't satisfy stutter requirement!\n");
403                 return 0;
404         }
405         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
406                 temp = clock >> i;
407
408                 if (temp >= min || i == 0)
409                         break;
410         }
411         return i;
412 }
413
414 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
415                 uint32_t clock, uint16_t sclk_al_threshold,
416                 struct SMU7_Discrete_GraphicsLevel *level)
417 {
418         int result;
419         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
420
421
422         result = ci_calculate_sclk_params(hwmgr, clock, level);
423
424         /* populate graphics levels */
425         result = ci_get_dependency_volt_by_clk(hwmgr,
426                         hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
427                         (uint32_t *)(&level->MinVddc));
428         if (result) {
429                 pr_err("vdd_dep_on_sclk table is NULL\n");
430                 return result;
431         }
432
433         level->SclkFrequency = clock;
434         level->MinVddcPhases = 1;
435
436         if (data->vddc_phase_shed_control)
437                 ci_populate_phase_value_based_on_sclk(hwmgr,
438                                 hwmgr->dyn_state.vddc_phase_shed_limits_table,
439                                 clock,
440                                 &level->MinVddcPhases);
441
442         level->ActivityLevel = sclk_al_threshold;
443         level->CcPwrDynRm = 0;
444         level->CcPwrDynRm1 = 0;
445         level->EnabledForActivity = 0;
446         /* this level can be used for throttling.*/
447         level->EnabledForThrottle = 1;
448         level->UpH = 0;
449         level->DownH = 0;
450         level->VoltageDownH = 0;
451         level->PowerThrottle = 0;
452
453
454         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
455                         PHM_PlatformCaps_SclkDeepSleep))
456                 level->DeepSleepDivId =
457                                 ci_get_sleep_divider_id_from_clock(clock,
458                                                 CISLAND_MINIMUM_ENGINE_CLOCK);
459
460         /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
461         level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
462
463         if (0 == result) {
464                 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
465                 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
466                 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
467                 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
468                 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
469                 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
470                 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
471                 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
472                 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
473                 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
474         }
475
476         return result;
477 }
478
479 int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
480 {
481         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
482         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
483         struct smu7_dpm_table *dpm_table = &data->dpm_table;
484         int result = 0;
485         uint32_t array = smu_data->dpm_table_start +
486                         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
487         uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
488                         SMU7_MAX_LEVELS_GRAPHICS;
489         struct SMU7_Discrete_GraphicsLevel *levels =
490                         smu_data->smc_state_table.GraphicsLevel;
491         uint32_t i;
492
493         for (i = 0; i < dpm_table->sclk_table.count; i++) {
494                 result = ci_populate_single_graphic_level(hwmgr,
495                                 dpm_table->sclk_table.dpm_levels[i].value,
496                                 (uint16_t)smu_data->activity_target[i],
497                                 &levels[i]);
498                 if (result)
499                         return result;
500                 if (i > 1)
501                         smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
502                 if (i == (dpm_table->sclk_table.count - 1))
503                         smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
504                                 PPSMC_DISPLAY_WATERMARK_HIGH;
505         }
506
507         smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
508
509         smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
510         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
511                 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
512
513         result = ci_copy_bytes_to_smc(hwmgr, array,
514                                    (u8 *)levels, array_size,
515                                    SMC_RAM_END);
516
517         return result;
518
519 }
520
521 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
522 {
523         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
524         const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
525
526         smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
527         smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
528         smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
529         smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
530
531         return 0;
532 }
533
534 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
535 {
536         uint16_t tdc_limit;
537         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
538         const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
539
540         tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
541         smu_data->power_tune_table.TDC_VDDC_PkgLimit =
542                         CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
543         smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
544                         defaults->tdc_vddc_throttle_release_limit_perc;
545         smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
546
547         return 0;
548 }
549
550 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
551 {
552         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
553         const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
554         uint32_t temp;
555
556         if (ci_read_smc_sram_dword(hwmgr,
557                         fuse_table_offset +
558                         offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
559                         (uint32_t *)&temp, SMC_RAM_END))
560                 PP_ASSERT_WITH_CODE(false,
561                                 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
562                                 return -EINVAL);
563         else
564                 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
565
566         return 0;
567 }
568
569 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
570 {
571         uint16_t tmp;
572         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
573
574         if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
575                 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
576                 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
577         else
578                 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
579
580         smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
581
582         return 0;
583 }
584
585 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
586 {
587         int i;
588         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
589         uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
590         uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
591         uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
592
593         PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
594                             "The CAC Leakage table does not exist!", return -EINVAL);
595         PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
596                             "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
597         PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
598                             "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
599
600         for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
601                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
602                         lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
603                         hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
604                         hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
605                 } else {
606                         lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
607                         hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
608                 }
609         }
610
611         return 0;
612 }
613
614 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
615 {
616         int i;
617         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
618         uint8_t *vid = smu_data->power_tune_table.VddCVid;
619         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
620
621         PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
622                 "There should never be more than 8 entries for VddcVid!!!",
623                 return -EINVAL);
624
625         for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
626                 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
627
628         return 0;
629 }
630
631 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
632 {
633         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
634         u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
635         u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
636         int i, min, max;
637
638         min = max = hi_vid[0];
639         for (i = 0; i < 8; i++) {
640                 if (0 != hi_vid[i]) {
641                         if (min > hi_vid[i])
642                                 min = hi_vid[i];
643                         if (max < hi_vid[i])
644                                 max = hi_vid[i];
645                 }
646
647                 if (0 != lo_vid[i]) {
648                         if (min > lo_vid[i])
649                                 min = lo_vid[i];
650                         if (max < lo_vid[i])
651                                 max = lo_vid[i];
652                 }
653         }
654
655         if ((min == 0) || (max == 0))
656                 return -EINVAL;
657         smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
658         smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
659
660         return 0;
661 }
662
663 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
664 {
665         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
666         uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
667         uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
668         struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
669
670         HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
671         LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
672
673         smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
674                         CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
675         smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
676                         CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
677
678         return 0;
679 }
680
681 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
682 {
683         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
684         uint32_t pm_fuse_table_offset;
685         int ret = 0;
686
687         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
688                         PHM_PlatformCaps_PowerContainment)) {
689                 if (ci_read_smc_sram_dword(hwmgr,
690                                 SMU7_FIRMWARE_HEADER_LOCATION +
691                                 offsetof(SMU7_Firmware_Header, PmFuseTable),
692                                 &pm_fuse_table_offset, SMC_RAM_END)) {
693                         pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
694                         return -EINVAL;
695                 }
696
697                 /* DW0 - DW3 */
698                 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
699                 /* DW4 - DW5 */
700                 ret |= ci_populate_vddc_vid(hwmgr);
701                 /* DW6 */
702                 ret |= ci_populate_svi_load_line(hwmgr);
703                 /* DW7 */
704                 ret |= ci_populate_tdc_limit(hwmgr);
705                 /* DW8 */
706                 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
707
708                 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
709
710                 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
711
712                 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
713                 if (ret)
714                         return ret;
715
716                 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
717                                 (uint8_t *)&smu_data->power_tune_table,
718                                 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
719         }
720         return ret;
721 }
722
723 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
724 {
725         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
726         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
727         const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
728         SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
729         struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
730         struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
731         const uint16_t *def1, *def2;
732         int i, j, k;
733
734         dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
735         dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
736
737         dpm_table->DTETjOffset = 0;
738         dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
739         dpm_table->GpuTjHyst = 8;
740
741         dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
742
743         if (ppm) {
744                 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
745                 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
746         } else {
747                 dpm_table->PPM_PkgPwrLimit = 0;
748                 dpm_table->PPM_TemperatureLimit = 0;
749         }
750
751         CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
752         CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
753
754         dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
755         def1 = defaults->bapmti_r;
756         def2 = defaults->bapmti_rc;
757
758         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
759                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
760                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
761                                 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
762                                 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
763                                 def1++;
764                                 def2++;
765                         }
766                 }
767         }
768
769         return 0;
770 }
771
772 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
773                 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
774                 uint16_t *lo)
775 {
776         uint16_t v_index;
777         bool vol_found = false;
778         *hi = tab->value * VOLTAGE_SCALE;
779         *lo = tab->value * VOLTAGE_SCALE;
780
781         PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
782                         "The SCLK/VDDC Dependency Table does not exist.\n",
783                         return -EINVAL);
784
785         if (NULL == hwmgr->dyn_state.cac_leakage_table) {
786                 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
787                 return 0;
788         }
789
790         for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
791                 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
792                         vol_found = true;
793                         if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
794                                 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
795                                 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
796                         } else {
797                                 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
798                                 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
799                                 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
800                         }
801                         break;
802                 }
803         }
804
805         if (!vol_found) {
806                 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
807                         if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
808                                 vol_found = true;
809                                 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
810                                         *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
811                                         *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
812                                 } else {
813                                         pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
814                                         *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
815                                         *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
816                                 }
817                                 break;
818                         }
819                 }
820
821                 if (!vol_found)
822                         pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
823         }
824
825         return 0;
826 }
827
828 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
829                 pp_atomctrl_voltage_table_entry *tab,
830                 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
831 {
832         int result;
833
834         result = ci_get_std_voltage_value_sidd(hwmgr, tab,
835                         &smc_voltage_tab->StdVoltageHiSidd,
836                         &smc_voltage_tab->StdVoltageLoSidd);
837         if (result) {
838                 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
839                 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
840         }
841
842         smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
843         CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
844         CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
845
846         return 0;
847 }
848
849 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
850                         SMU7_Discrete_DpmTable *table)
851 {
852         unsigned int count;
853         int result;
854         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
855
856         table->VddcLevelCount = data->vddc_voltage_table.count;
857         for (count = 0; count < table->VddcLevelCount; count++) {
858                 result = ci_populate_smc_voltage_table(hwmgr,
859                                 &(data->vddc_voltage_table.entries[count]),
860                                 &(table->VddcLevel[count]));
861                 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
862
863                 /* GPIO voltage control */
864                 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
865                         table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
866                 else
867                         table->VddcLevel[count].Smio = 0;
868         }
869
870         CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
871
872         return 0;
873 }
874
875 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
876                         SMU7_Discrete_DpmTable *table)
877 {
878         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
879         uint32_t count;
880         int result;
881
882         table->VddciLevelCount = data->vddci_voltage_table.count;
883
884         for (count = 0; count < table->VddciLevelCount; count++) {
885                 result = ci_populate_smc_voltage_table(hwmgr,
886                                 &(data->vddci_voltage_table.entries[count]),
887                                 &(table->VddciLevel[count]));
888                 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
889                 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
890                         table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
891                 else
892                         table->VddciLevel[count].Smio |= 0;
893         }
894
895         CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
896
897         return 0;
898 }
899
900 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
901                         SMU7_Discrete_DpmTable *table)
902 {
903         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
904         uint32_t count;
905         int result;
906
907         table->MvddLevelCount = data->mvdd_voltage_table.count;
908
909         for (count = 0; count < table->MvddLevelCount; count++) {
910                 result = ci_populate_smc_voltage_table(hwmgr,
911                                 &(data->mvdd_voltage_table.entries[count]),
912                                 &table->MvddLevel[count]);
913                 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
914                 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
915                         table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
916                 else
917                         table->MvddLevel[count].Smio |= 0;
918         }
919
920         CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
921
922         return 0;
923 }
924
925
926 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
927         SMU7_Discrete_DpmTable *table)
928 {
929         int result;
930
931         result = ci_populate_smc_vddc_table(hwmgr, table);
932         PP_ASSERT_WITH_CODE(0 == result,
933                         "can not populate VDDC voltage table to SMC", return -EINVAL);
934
935         result = ci_populate_smc_vdd_ci_table(hwmgr, table);
936         PP_ASSERT_WITH_CODE(0 == result,
937                         "can not populate VDDCI voltage table to SMC", return -EINVAL);
938
939         result = ci_populate_smc_mvdd_table(hwmgr, table);
940         PP_ASSERT_WITH_CODE(0 == result,
941                         "can not populate MVDD voltage table to SMC", return -EINVAL);
942
943         return 0;
944 }
945
946 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
947                 struct SMU7_Discrete_Ulv *state)
948 {
949         uint32_t voltage_response_time, ulv_voltage;
950         int result;
951         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
952
953         state->CcPwrDynRm = 0;
954         state->CcPwrDynRm1 = 0;
955
956         result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
957         PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
958
959         if (ulv_voltage == 0) {
960                 data->ulv_supported = false;
961                 return 0;
962         }
963
964         if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
965                 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
966                 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
967                         state->VddcOffset = 0;
968                 else
969                         /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
970                         state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
971         } else {
972                 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
973                 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
974                         state->VddcOffsetVid = 0;
975                 else  /* used in SVI2 Mode */
976                         state->VddcOffsetVid = (uint8_t)(
977                                         (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
978                                                 * VOLTAGE_VID_OFFSET_SCALE2
979                                                 / VOLTAGE_VID_OFFSET_SCALE1);
980         }
981         state->VddcPhase = 1;
982
983         CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
984         CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
985         CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
986
987         return 0;
988 }
989
990 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
991                  SMU7_Discrete_Ulv *ulv_level)
992 {
993         return ci_populate_ulv_level(hwmgr, ulv_level);
994 }
995
996 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
997 {
998         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
999         struct smu7_dpm_table *dpm_table = &data->dpm_table;
1000         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1001         uint32_t i;
1002
1003 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1004         for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1005                 table->LinkLevel[i].PcieGenSpeed  =
1006                         (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1007                 table->LinkLevel[i].PcieLaneCount =
1008                         (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1009                 table->LinkLevel[i].EnabledForActivity = 1;
1010                 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1011                 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1012         }
1013
1014         smu_data->smc_state_table.LinkLevelCount =
1015                 (uint8_t)dpm_table->pcie_speed_table.count;
1016         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1017                 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1018
1019         return 0;
1020 }
1021
1022 static int ci_calculate_mclk_params(
1023                 struct pp_hwmgr *hwmgr,
1024                 uint32_t memory_clock,
1025                 SMU7_Discrete_MemoryLevel *mclk,
1026                 bool strobe_mode,
1027                 bool dllStateOn
1028                 )
1029 {
1030         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031         uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
1032         uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1033         uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1034         uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1035         uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1036         uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1037         uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1038         uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
1039         uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
1040
1041         pp_atomctrl_memory_clock_param mpll_param;
1042         int result;
1043
1044         result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1045                                 memory_clock, &mpll_param, strobe_mode);
1046         PP_ASSERT_WITH_CODE(0 == result,
1047                 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1048
1049         mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1050
1051         mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1052                                                         MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1053         mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1054                                                         MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1055         mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1056                                                         MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1057
1058         mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1059                                                         MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1060
1061         if (data->is_memory_gddr5) {
1062                 mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1063                                                                 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1064                 mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1065                                                                 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1066         }
1067
1068         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1069                         PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1070                 pp_atomctrl_internal_ss_info ss_info;
1071                 uint32_t freq_nom;
1072                 uint32_t tmp;
1073                 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1074
1075                 /* for GDDR5 for all modes and DDR3 */
1076                 if (1 == mpll_param.qdr)
1077                         freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1078                 else
1079                         freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1080
1081                 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
1082                 tmp = (freq_nom / reference_clock);
1083                 tmp = tmp * tmp;
1084
1085                 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1086                         uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1087                         uint32_t clkv =
1088                                 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1089                                                         ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1090
1091                         mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1092                         mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1093                 }
1094         }
1095
1096         mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1097                 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1098         mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1099                 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1100         mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1101                 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1102
1103
1104         mclk->MclkFrequency   = memory_clock;
1105         mclk->MpllFuncCntl    = mpll_func_cntl;
1106         mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
1107         mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
1108         mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
1109         mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
1110         mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
1111         mclk->DllCntl         = dll_cntl;
1112         mclk->MpllSs1         = mpll_ss1;
1113         mclk->MpllSs2         = mpll_ss2;
1114
1115         return 0;
1116 }
1117
1118 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1119                 bool strobe_mode)
1120 {
1121         uint8_t mc_para_index;
1122
1123         if (strobe_mode) {
1124                 if (memory_clock < 12500)
1125                         mc_para_index = 0x00;
1126                 else if (memory_clock > 47500)
1127                         mc_para_index = 0x0f;
1128                 else
1129                         mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1130         } else {
1131                 if (memory_clock < 65000)
1132                         mc_para_index = 0x00;
1133                 else if (memory_clock > 135000)
1134                         mc_para_index = 0x0f;
1135                 else
1136                         mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1137         }
1138
1139         return mc_para_index;
1140 }
1141
1142 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1143 {
1144         uint8_t mc_para_index;
1145
1146         if (memory_clock < 10000)
1147                 mc_para_index = 0;
1148         else if (memory_clock >= 80000)
1149                 mc_para_index = 0x0f;
1150         else
1151                 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1152
1153         return mc_para_index;
1154 }
1155
1156 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1157                                         uint32_t memory_clock, uint32_t *p_shed)
1158 {
1159         unsigned int i;
1160
1161         *p_shed = 1;
1162
1163         for (i = 0; i < pl->count; i++) {
1164                 if (memory_clock < pl->entries[i].Mclk) {
1165                         *p_shed = i;
1166                         break;
1167                 }
1168         }
1169
1170         return 0;
1171 }
1172
1173 static int ci_populate_single_memory_level(
1174                 struct pp_hwmgr *hwmgr,
1175                 uint32_t memory_clock,
1176                 SMU7_Discrete_MemoryLevel *memory_level
1177                 )
1178 {
1179         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1180         int result = 0;
1181         bool dll_state_on;
1182         struct cgs_display_info info = {0};
1183         uint32_t mclk_edc_wr_enable_threshold = 40000;
1184         uint32_t mclk_edc_enable_threshold = 40000;
1185         uint32_t mclk_strobe_mode_threshold = 40000;
1186
1187         if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1188                 result = ci_get_dependency_volt_by_clk(hwmgr,
1189                         hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1190                 PP_ASSERT_WITH_CODE((0 == result),
1191                         "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1192         }
1193
1194         if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1195                 result = ci_get_dependency_volt_by_clk(hwmgr,
1196                                 hwmgr->dyn_state.vddci_dependency_on_mclk,
1197                                 memory_clock,
1198                                 &memory_level->MinVddci);
1199                 PP_ASSERT_WITH_CODE((0 == result),
1200                         "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1201         }
1202
1203         if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1204                 result = ci_get_dependency_volt_by_clk(hwmgr,
1205                                 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1206                                 memory_clock,
1207                                 &memory_level->MinMvdd);
1208                 PP_ASSERT_WITH_CODE((0 == result),
1209                         "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1210         }
1211
1212         memory_level->MinVddcPhases = 1;
1213
1214         if (data->vddc_phase_shed_control) {
1215                 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1216                                 memory_clock, &memory_level->MinVddcPhases);
1217         }
1218
1219         memory_level->EnabledForThrottle = 1;
1220         memory_level->EnabledForActivity = 1;
1221         memory_level->UpH = 0;
1222         memory_level->DownH = 100;
1223         memory_level->VoltageDownH = 0;
1224
1225         /* Indicates maximum activity level for this performance level.*/
1226         memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1227         memory_level->StutterEnable = 0;
1228         memory_level->StrobeEnable = 0;
1229         memory_level->EdcReadEnable = 0;
1230         memory_level->EdcWriteEnable = 0;
1231         memory_level->RttEnable = 0;
1232
1233         /* default set to low watermark. Highest level will be set to high later.*/
1234         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1235
1236         cgs_get_active_displays_info(hwmgr->device, &info);
1237         data->display_timing.num_existing_displays = info.display_count;
1238
1239         /* stutter mode not support on ci */
1240
1241         /* decide strobe mode*/
1242         memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1243                 (memory_clock <= mclk_strobe_mode_threshold);
1244
1245         /* decide EDC mode and memory clock ratio*/
1246         if (data->is_memory_gddr5) {
1247                 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1248                                         memory_level->StrobeEnable);
1249
1250                 if ((mclk_edc_enable_threshold != 0) &&
1251                                 (memory_clock > mclk_edc_enable_threshold)) {
1252                         memory_level->EdcReadEnable = 1;
1253                 }
1254
1255                 if ((mclk_edc_wr_enable_threshold != 0) &&
1256                                 (memory_clock > mclk_edc_wr_enable_threshold)) {
1257                         memory_level->EdcWriteEnable = 1;
1258                 }
1259
1260                 if (memory_level->StrobeEnable) {
1261                         if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1262                                         ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1263                                 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1264                         else
1265                                 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1266                 } else
1267                         dll_state_on = data->dll_default_on;
1268         } else {
1269                 memory_level->StrobeRatio =
1270                         ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1271                 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1272         }
1273
1274         result = ci_calculate_mclk_params(hwmgr,
1275                 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1276
1277         if (0 == result) {
1278                 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1279                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1280                 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1281                 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1282                 /* MCLK frequency in units of 10KHz*/
1283                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1284                 /* Indicates maximum activity level for this performance level.*/
1285                 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1286                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1287                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1288                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1289                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1290                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1291                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1292                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1293                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1294                 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1295         }
1296
1297         return result;
1298 }
1299
1300 int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1301 {
1302         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1303         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1304         struct smu7_dpm_table *dpm_table = &data->dpm_table;
1305         int result;
1306         struct cgs_system_info sys_info = {0};
1307         uint32_t dev_id;
1308
1309         uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1310         uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1311         SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1312         uint32_t i;
1313
1314         memset(levels, 0x00, level_array_size);
1315
1316         for (i = 0; i < dpm_table->mclk_table.count; i++) {
1317                 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1318                         "can not populate memory level as memory clock is zero", return -EINVAL);
1319                 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1320                         &(smu_data->smc_state_table.MemoryLevel[i]));
1321                 if (0 != result)
1322                         return result;
1323         }
1324
1325         smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1326
1327         sys_info.size = sizeof(struct cgs_system_info);
1328         sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1329         cgs_query_system_info(hwmgr->device, &sys_info);
1330         dev_id = (uint32_t)sys_info.value;
1331
1332         if ((dpm_table->mclk_table.count >= 2)
1333                 && ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
1334                 smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1335                                 smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1336                 smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1337                                 smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1338         }
1339         smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1340         CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1341
1342         smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1343         data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1344         smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1345
1346         result = ci_copy_bytes_to_smc(hwmgr,
1347                 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1348                 SMC_RAM_END);
1349
1350         return result;
1351 }
1352
1353 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1354                                         SMU7_Discrete_VoltageLevel *voltage)
1355 {
1356         const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1357
1358         uint32_t i = 0;
1359
1360         if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1361                 /* find mvdd value which clock is more than request */
1362                 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1363                         if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1364                                 /* Always round to higher voltage. */
1365                                 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1366                                 break;
1367                         }
1368                 }
1369
1370                 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1371                         "MVDD Voltage is outside the supported range.", return -EINVAL);
1372
1373         } else {
1374                 return -EINVAL;
1375         }
1376
1377         return 0;
1378 }
1379
1380 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1381         SMU7_Discrete_DpmTable *table)
1382 {
1383         int result = 0;
1384         const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1385         struct pp_atomctrl_clock_dividers_vi dividers;
1386
1387         SMU7_Discrete_VoltageLevel voltage_level;
1388         uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1389         uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1390         uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
1391         uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
1392
1393
1394         /* The ACPI state should not do DPM on DC (or ever).*/
1395         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1396
1397         if (data->acpi_vddc)
1398                 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1399         else
1400                 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1401
1402         table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1403         /* assign zero for now*/
1404         table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1405
1406         /* get the engine clock dividers for this clock value*/
1407         result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1408                 table->ACPILevel.SclkFrequency,  &dividers);
1409
1410         PP_ASSERT_WITH_CODE(result == 0,
1411                 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1412
1413         /* divider ID for required SCLK*/
1414         table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1415         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1416         table->ACPILevel.DeepSleepDivId = 0;
1417
1418         spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1419                                                         CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
1420         spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1421                                                         CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
1422         spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
1423                                                         CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
1424
1425         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1426         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1427         table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1428         table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1429         table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1430         table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1431         table->ACPILevel.CcPwrDynRm = 0;
1432         table->ACPILevel.CcPwrDynRm1 = 0;
1433
1434         /* For various features to be enabled/disabled while this level is active.*/
1435         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1436         /* SCLK frequency in units of 10KHz*/
1437         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1438         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1439         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1440         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1441         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1442         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1443         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1444         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1445         CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1446
1447
1448         /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1449         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1450         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1451
1452         if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1453                 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1454         else {
1455                 if (data->acpi_vddci != 0)
1456                         table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1457                 else
1458                         table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1459         }
1460
1461         if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1462                 table->MemoryACPILevel.MinMvdd =
1463                         PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1464         else
1465                 table->MemoryACPILevel.MinMvdd = 0;
1466
1467         /* Force reset on DLL*/
1468         mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1469                 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1470         mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1471                 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1472
1473         /* Disable DLL in ACPIState*/
1474         mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1475                 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1476         mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1477                 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1478
1479         /* Enable DLL bypass signal*/
1480         dll_cntl            = PHM_SET_FIELD(dll_cntl,
1481                 DLL_CNTL, MRDCK0_BYPASS, 0);
1482         dll_cntl            = PHM_SET_FIELD(dll_cntl,
1483                 DLL_CNTL, MRDCK1_BYPASS, 0);
1484
1485         table->MemoryACPILevel.DllCntl            =
1486                 PP_HOST_TO_SMC_UL(dll_cntl);
1487         table->MemoryACPILevel.MclkPwrmgtCntl     =
1488                 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1489         table->MemoryACPILevel.MpllAdFuncCntl     =
1490                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1491         table->MemoryACPILevel.MpllDqFuncCntl     =
1492                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1493         table->MemoryACPILevel.MpllFuncCntl       =
1494                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1495         table->MemoryACPILevel.MpllFuncCntl_1     =
1496                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1497         table->MemoryACPILevel.MpllFuncCntl_2     =
1498                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1499         table->MemoryACPILevel.MpllSs1            =
1500                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1501         table->MemoryACPILevel.MpllSs2            =
1502                 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1503
1504         table->MemoryACPILevel.EnabledForThrottle = 0;
1505         table->MemoryACPILevel.EnabledForActivity = 0;
1506         table->MemoryACPILevel.UpH = 0;
1507         table->MemoryACPILevel.DownH = 100;
1508         table->MemoryACPILevel.VoltageDownH = 0;
1509         /* Indicates maximum activity level for this performance level.*/
1510         table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1511
1512         table->MemoryACPILevel.StutterEnable = 0;
1513         table->MemoryACPILevel.StrobeEnable = 0;
1514         table->MemoryACPILevel.EdcReadEnable = 0;
1515         table->MemoryACPILevel.EdcWriteEnable = 0;
1516         table->MemoryACPILevel.RttEnable = 0;
1517
1518         return result;
1519 }
1520
1521 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1522                                         SMU7_Discrete_DpmTable *table)
1523 {
1524         int result = 0;
1525         uint8_t count;
1526         struct pp_atomctrl_clock_dividers_vi dividers;
1527         struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1528                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1529
1530         table->UvdLevelCount = (uint8_t)(uvd_table->count);
1531
1532         for (count = 0; count < table->UvdLevelCount; count++) {
1533                 table->UvdLevel[count].VclkFrequency =
1534                                         uvd_table->entries[count].vclk;
1535                 table->UvdLevel[count].DclkFrequency =
1536                                         uvd_table->entries[count].dclk;
1537                 table->UvdLevel[count].MinVddc =
1538                                         uvd_table->entries[count].v * VOLTAGE_SCALE;
1539                 table->UvdLevel[count].MinVddcPhases = 1;
1540
1541                 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1542                                 table->UvdLevel[count].VclkFrequency, &dividers);
1543                 PP_ASSERT_WITH_CODE((0 == result),
1544                                 "can not find divide id for Vclk clock", return result);
1545
1546                 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1547
1548                 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1549                                 table->UvdLevel[count].DclkFrequency, &dividers);
1550                 PP_ASSERT_WITH_CODE((0 == result),
1551                                 "can not find divide id for Dclk clock", return result);
1552
1553                 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1554                 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1555                 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1556                 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1557         }
1558
1559         return result;
1560 }
1561
1562 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1563                 SMU7_Discrete_DpmTable *table)
1564 {
1565         int result = -EINVAL;
1566         uint8_t count;
1567         struct pp_atomctrl_clock_dividers_vi dividers;
1568         struct phm_vce_clock_voltage_dependency_table *vce_table =
1569                                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1570
1571         table->VceLevelCount = (uint8_t)(vce_table->count);
1572         table->VceBootLevel = 0;
1573
1574         for (count = 0; count < table->VceLevelCount; count++) {
1575                 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1576                 table->VceLevel[count].MinVoltage =
1577                                 vce_table->entries[count].v * VOLTAGE_SCALE;
1578                 table->VceLevel[count].MinPhases = 1;
1579
1580                 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1581                                 table->VceLevel[count].Frequency, &dividers);
1582                 PP_ASSERT_WITH_CODE((0 == result),
1583                                 "can not find divide id for VCE engine clock",
1584                                 return result);
1585
1586                 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1587
1588                 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1589                 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1590         }
1591         return result;
1592 }
1593
1594 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1595                                         SMU7_Discrete_DpmTable *table)
1596 {
1597         int result = -EINVAL;
1598         uint8_t count;
1599         struct pp_atomctrl_clock_dividers_vi dividers;
1600         struct phm_acp_clock_voltage_dependency_table *acp_table =
1601                                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1602
1603         table->AcpLevelCount = (uint8_t)(acp_table->count);
1604         table->AcpBootLevel = 0;
1605
1606         for (count = 0; count < table->AcpLevelCount; count++) {
1607                 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1608                 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1609                 table->AcpLevel[count].MinPhases = 1;
1610
1611                 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1612                                 table->AcpLevel[count].Frequency, &dividers);
1613                 PP_ASSERT_WITH_CODE((0 == result),
1614                                 "can not find divide id for engine clock", return result);
1615
1616                 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1617
1618                 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1619                 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1620         }
1621         return result;
1622 }
1623
1624 static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1625                                         SMU7_Discrete_DpmTable *table)
1626 {
1627         int result = -EINVAL;
1628         uint8_t count;
1629         struct pp_atomctrl_clock_dividers_vi dividers;
1630         struct phm_samu_clock_voltage_dependency_table *samu_table =
1631                                 hwmgr->dyn_state.samu_clock_voltage_dependency_table;
1632
1633         table->SamuBootLevel = 0;
1634         table->SamuLevelCount = (uint8_t)(samu_table->count);
1635
1636         for (count = 0; count < table->SamuLevelCount; count++) {
1637                 table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
1638                 table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
1639                 table->SamuLevel[count].MinPhases = 1;
1640
1641                 /* retrieve divider value for VBIOS */
1642                 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1643                                 table->SamuLevel[count].Frequency, &dividers);
1644                 PP_ASSERT_WITH_CODE((0 == result),
1645                                 "can not find divide id for samu clock", return result);
1646
1647                 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1648
1649                 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1650                 CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
1651         }
1652         return result;
1653 }
1654
1655 static int ci_populate_memory_timing_parameters(
1656                 struct pp_hwmgr *hwmgr,
1657                 uint32_t engine_clock,
1658                 uint32_t memory_clock,
1659                 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1660                 )
1661 {
1662         uint32_t dramTiming;
1663         uint32_t dramTiming2;
1664         uint32_t burstTime;
1665         int result;
1666
1667         result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1668                                 engine_clock, memory_clock);
1669
1670         PP_ASSERT_WITH_CODE(result == 0,
1671                 "Error calling VBIOS to set DRAM_TIMING.", return result);
1672
1673         dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1674         dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1675         burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1676
1677         arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
1678         arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1679         arb_regs->McArbBurstTime = (uint8_t)burstTime;
1680
1681         return 0;
1682 }
1683
1684 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1685 {
1686         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1687         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1688         int result = 0;
1689         SMU7_Discrete_MCArbDramTimingTable  arb_regs;
1690         uint32_t i, j;
1691
1692         memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1693
1694         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1695                 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1696                         result = ci_populate_memory_timing_parameters
1697                                 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1698                                  data->dpm_table.mclk_table.dpm_levels[j].value,
1699                                  &arb_regs.entries[i][j]);
1700
1701                         if (0 != result)
1702                                 break;
1703                 }
1704         }
1705
1706         if (0 == result) {
1707                 result = ci_copy_bytes_to_smc(
1708                                 hwmgr,
1709                                 smu_data->arb_table_start,
1710                                 (uint8_t *)&arb_regs,
1711                                 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1712                                 SMC_RAM_END
1713                                 );
1714         }
1715
1716         return result;
1717 }
1718
1719 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1720                         SMU7_Discrete_DpmTable *table)
1721 {
1722         int result = 0;
1723         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1724         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1725
1726         table->GraphicsBootLevel = 0;
1727         table->MemoryBootLevel = 0;
1728
1729         /* find boot level from dpm table*/
1730         result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1731                         data->vbios_boot_state.sclk_bootup_value,
1732                         (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1733
1734         if (0 != result) {
1735                 smu_data->smc_state_table.GraphicsBootLevel = 0;
1736                 pr_err("VBIOS did not find boot engine clock value \
1737                         in dependency table. Using Graphics DPM level 0!");
1738                 result = 0;
1739         }
1740
1741         result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1742                 data->vbios_boot_state.mclk_bootup_value,
1743                 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1744
1745         if (0 != result) {
1746                 smu_data->smc_state_table.MemoryBootLevel = 0;
1747                 pr_err("VBIOS did not find boot engine clock value \
1748                         in dependency table. Using Memory DPM level 0!");
1749                 result = 0;
1750         }
1751
1752         table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1753         table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1754         table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1755
1756         return result;
1757 }
1758
1759 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1760                                  SMU7_Discrete_MCRegisters *mc_reg_table)
1761 {
1762         const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1763
1764         uint32_t i, j;
1765
1766         for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1767                 if (smu_data->mc_reg_table.validflag & 1<<j) {
1768                         PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1769                                 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1770                         mc_reg_table->address[i].s0 =
1771                                 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1772                         mc_reg_table->address[i].s1 =
1773                                 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1774                         i++;
1775                 }
1776         }
1777
1778         mc_reg_table->last = (uint8_t)i;
1779
1780         return 0;
1781 }
1782
1783 static void ci_convert_mc_registers(
1784         const struct ci_mc_reg_entry *entry,
1785         SMU7_Discrete_MCRegisterSet *data,
1786         uint32_t num_entries, uint32_t valid_flag)
1787 {
1788         uint32_t i, j;
1789
1790         for (i = 0, j = 0; j < num_entries; j++) {
1791                 if (valid_flag & 1<<j) {
1792                         data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1793                         i++;
1794                 }
1795         }
1796 }
1797
1798 static int ci_convert_mc_reg_table_entry_to_smc(
1799                 struct pp_hwmgr *hwmgr,
1800                 const uint32_t memory_clock,
1801                 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1802                 )
1803 {
1804         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1805         uint32_t i = 0;
1806
1807         for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1808                 if (memory_clock <=
1809                         smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1810                         break;
1811                 }
1812         }
1813
1814         if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1815                 --i;
1816
1817         ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1818                                 mc_reg_table_data, smu_data->mc_reg_table.last,
1819                                 smu_data->mc_reg_table.validflag);
1820
1821         return 0;
1822 }
1823
1824 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1825                 SMU7_Discrete_MCRegisters *mc_regs)
1826 {
1827         int result = 0;
1828         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1829         int res;
1830         uint32_t i;
1831
1832         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1833                 res = ci_convert_mc_reg_table_entry_to_smc(
1834                                 hwmgr,
1835                                 data->dpm_table.mclk_table.dpm_levels[i].value,
1836                                 &mc_regs->data[i]
1837                                 );
1838
1839                 if (0 != res)
1840                         result = res;
1841         }
1842
1843         return result;
1844 }
1845
1846 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1847 {
1848         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1849         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1850         uint32_t address;
1851         int32_t result;
1852
1853         if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1854                 return 0;
1855
1856
1857         memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1858
1859         result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1860
1861         if (result != 0)
1862                 return result;
1863
1864         address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1865
1866         return  ci_copy_bytes_to_smc(hwmgr, address,
1867                                  (uint8_t *)&smu_data->mc_regs.data[0],
1868                                 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1869                                 SMC_RAM_END);
1870 }
1871
1872 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1873 {
1874         int result;
1875         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1876
1877         memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1878         result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1879         PP_ASSERT_WITH_CODE(0 == result,
1880                 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1881
1882         result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1883         PP_ASSERT_WITH_CODE(0 == result,
1884                 "Failed to initialize MCRegTable for driver state!", return result;);
1885
1886         return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1887                         (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1888 }
1889
1890 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1891 {
1892         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1893         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1894         uint8_t count, level;
1895
1896         count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1897
1898         for (level = 0; level < count; level++) {
1899                 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1900                          >= data->vbios_boot_state.sclk_bootup_value) {
1901                         smu_data->smc_state_table.GraphicsBootLevel = level;
1902                         break;
1903                 }
1904         }
1905
1906         count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1907
1908         for (level = 0; level < count; level++) {
1909                 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1910                         >= data->vbios_boot_state.mclk_bootup_value) {
1911                         smu_data->smc_state_table.MemoryBootLevel = level;
1912                         break;
1913                 }
1914         }
1915
1916         return 0;
1917 }
1918
1919 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1920                                             SMU7_Discrete_DpmTable *table)
1921 {
1922         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1923
1924         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1925                 table->SVI2Enable = 1;
1926         else
1927                 table->SVI2Enable = 0;
1928         return 0;
1929 }
1930
1931 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1932 {
1933         /* set smc instruct start point at 0x0 */
1934         ci_program_jump_on_start(hwmgr);
1935
1936         /* enable smc clock */
1937         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1938
1939         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1940
1941         SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1942                                  INTERRUPTS_ENABLED, 1);
1943
1944         return 0;
1945 }
1946
1947 int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1948 {
1949         int result;
1950         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1951         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1952         SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
1953         struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1954         u32 i;
1955
1956         ci_initialize_power_tune_defaults(hwmgr);
1957         memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1958
1959         if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1960                 ci_populate_smc_voltage_tables(hwmgr, table);
1961
1962         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1963                         PHM_PlatformCaps_AutomaticDCTransition))
1964                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1965
1966
1967         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1968                         PHM_PlatformCaps_StepVddc))
1969                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1970
1971         if (data->is_memory_gddr5)
1972                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1973
1974         if (data->ulv_supported) {
1975                 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1976                 PP_ASSERT_WITH_CODE(0 == result,
1977                         "Failed to initialize ULV state!", return result);
1978
1979                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1980                         ixCG_ULV_PARAMETER, 0x40035);
1981         }
1982
1983         result = ci_populate_all_graphic_levels(hwmgr);
1984         PP_ASSERT_WITH_CODE(0 == result,
1985                 "Failed to initialize Graphics Level!", return result);
1986
1987         result = ci_populate_all_memory_levels(hwmgr);
1988         PP_ASSERT_WITH_CODE(0 == result,
1989                 "Failed to initialize Memory Level!", return result);
1990
1991         result = ci_populate_smc_link_level(hwmgr, table);
1992         PP_ASSERT_WITH_CODE(0 == result,
1993                 "Failed to initialize Link Level!", return result);
1994
1995         result = ci_populate_smc_acpi_level(hwmgr, table);
1996         PP_ASSERT_WITH_CODE(0 == result,
1997                 "Failed to initialize ACPI Level!", return result);
1998
1999         result = ci_populate_smc_vce_level(hwmgr, table);
2000         PP_ASSERT_WITH_CODE(0 == result,
2001                 "Failed to initialize VCE Level!", return result);
2002
2003         result = ci_populate_smc_acp_level(hwmgr, table);
2004         PP_ASSERT_WITH_CODE(0 == result,
2005                 "Failed to initialize ACP Level!", return result);
2006
2007         result = ci_populate_smc_samu_level(hwmgr, table);
2008         PP_ASSERT_WITH_CODE(0 == result,
2009                 "Failed to initialize SAMU Level!", return result);
2010
2011         /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2012         /* need to populate the  ARB settings for the initial state. */
2013         result = ci_program_memory_timing_parameters(hwmgr);
2014         PP_ASSERT_WITH_CODE(0 == result,
2015                 "Failed to Write ARB settings for the initial state.", return result);
2016
2017         result = ci_populate_smc_uvd_level(hwmgr, table);
2018         PP_ASSERT_WITH_CODE(0 == result,
2019                 "Failed to initialize UVD Level!", return result);
2020
2021         table->UvdBootLevel  = 0;
2022         table->VceBootLevel  = 0;
2023         table->AcpBootLevel  = 0;
2024         table->SamuBootLevel  = 0;
2025
2026         table->GraphicsBootLevel = 0;
2027         table->MemoryBootLevel = 0;
2028
2029         result = ci_populate_smc_boot_level(hwmgr, table);
2030         PP_ASSERT_WITH_CODE(0 == result,
2031                 "Failed to initialize Boot Level!", return result);
2032
2033         result = ci_populate_smc_initial_state(hwmgr);
2034         PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2035
2036         result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2037         PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2038
2039         table->UVDInterval = 1;
2040         table->VCEInterval = 1;
2041         table->ACPInterval = 1;
2042         table->SAMUInterval = 1;
2043         table->GraphicsVoltageChangeEnable  = 1;
2044         table->GraphicsThermThrottleEnable  = 1;
2045         table->GraphicsInterval = 1;
2046         table->VoltageInterval  = 1;
2047         table->ThermalInterval  = 1;
2048
2049         table->TemperatureLimitHigh =
2050                 (data->thermal_temp_setting.temperature_high *
2051                  SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2052         table->TemperatureLimitLow =
2053                 (data->thermal_temp_setting.temperature_low *
2054                 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2055
2056         table->MemoryVoltageChangeEnable  = 1;
2057         table->MemoryInterval  = 1;
2058         table->VoltageResponseTime  = 0;
2059         table->VddcVddciDelta = 4000;
2060         table->PhaseResponseTime  = 0;
2061         table->MemoryThermThrottleEnable  = 1;
2062
2063         PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2064                         "There must be 1 or more PCIE levels defined in PPTable.",
2065                         return -EINVAL);
2066
2067         table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2068         table->PCIeGenInterval = 1;
2069
2070         ci_populate_smc_svi2_config(hwmgr, table);
2071
2072         for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2073                 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2074
2075         table->ThermGpio  = 17;
2076         table->SclkStepSize = 0x4000;
2077         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2078                 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2079                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2080                                 PHM_PlatformCaps_RegulatorHot);
2081         } else {
2082                 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2083                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2084                                 PHM_PlatformCaps_RegulatorHot);
2085         }
2086
2087         table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2088
2089         CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2090         CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2091         CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2092         CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2093         CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2094         CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2095         CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2096         CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2097         table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2098         CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2099         CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2100
2101         table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2102         table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2103         table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2104
2105         /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2106         result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2107                                         offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2108                                         (uint8_t *)&(table->SystemFlags),
2109                                         sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2110                                         SMC_RAM_END);
2111
2112         PP_ASSERT_WITH_CODE(0 == result,
2113                 "Failed to upload dpm data to SMC memory!", return result;);
2114
2115         result = ci_populate_initial_mc_reg_table(hwmgr);
2116         PP_ASSERT_WITH_CODE((0 == result),
2117                 "Failed to populate initialize MC Reg table!", return result);
2118
2119         result = ci_populate_pm_fuses(hwmgr);
2120         PP_ASSERT_WITH_CODE(0 == result,
2121                         "Failed to  populate PM fuses to SMC memory!", return result);
2122
2123         ci_start_smc(hwmgr);
2124
2125         return 0;
2126 }
2127
2128 int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2129 {
2130         struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2131         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2132         uint32_t duty100;
2133         uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2134         uint16_t fdo_min, slope1, slope2;
2135         uint32_t reference_clock;
2136         int res;
2137         uint64_t tmp64;
2138
2139         if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2140                 return 0;
2141
2142         if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2143                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2144                         PHM_PlatformCaps_MicrocodeFanControl);
2145                 return 0;
2146         }
2147
2148         if (0 == ci_data->fan_table_start) {
2149                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2150                 return 0;
2151         }
2152
2153         duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2154
2155         if (0 == duty100) {
2156                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2157                 return 0;
2158         }
2159
2160         tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2161         do_div(tmp64, 10000);
2162         fdo_min = (uint16_t)tmp64;
2163
2164         t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2165         t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2166
2167         pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2168         pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2169
2170         slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2171         slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2172
2173         fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2174         fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2175         fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2176
2177         fan_table.Slope1 = cpu_to_be16(slope1);
2178         fan_table.Slope2 = cpu_to_be16(slope2);
2179
2180         fan_table.FdoMin = cpu_to_be16(fdo_min);
2181
2182         fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2183
2184         fan_table.HystUp = cpu_to_be16(1);
2185
2186         fan_table.HystSlope = cpu_to_be16(1);
2187
2188         fan_table.TempRespLim = cpu_to_be16(5);
2189
2190         reference_clock = smu7_get_xclk(hwmgr);
2191
2192         fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2193
2194         fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2195
2196         fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2197
2198         res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2199
2200         return 0;
2201 }
2202
2203 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2204 {
2205         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2206
2207         if (data->need_update_smu7_dpm_table &
2208                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2209                 return ci_program_memory_timing_parameters(hwmgr);
2210
2211         return 0;
2212 }
2213
2214 int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2215 {
2216         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2217         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2218
2219         int result = 0;
2220         uint32_t low_sclk_interrupt_threshold = 0;
2221
2222         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2223                         PHM_PlatformCaps_SclkThrottleLowNotification)
2224                 && (hwmgr->gfx_arbiter.sclk_threshold !=
2225                                 data->low_sclk_interrupt_threshold)) {
2226                 data->low_sclk_interrupt_threshold =
2227                                 hwmgr->gfx_arbiter.sclk_threshold;
2228                 low_sclk_interrupt_threshold =
2229                                 data->low_sclk_interrupt_threshold;
2230
2231                 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2232
2233                 result = ci_copy_bytes_to_smc(
2234                                 hwmgr,
2235                                 smu_data->dpm_table_start +
2236                                 offsetof(SMU7_Discrete_DpmTable,
2237                                         LowSclkInterruptT),
2238                                 (uint8_t *)&low_sclk_interrupt_threshold,
2239                                 sizeof(uint32_t),
2240                                 SMC_RAM_END);
2241         }
2242
2243         result = ci_update_and_upload_mc_reg_table(hwmgr);
2244
2245         PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2246
2247         result = ci_program_mem_timing_parameters(hwmgr);
2248         PP_ASSERT_WITH_CODE((result == 0),
2249                         "Failed to program memory timing parameters!",
2250                         );
2251
2252         return result;
2253 }
2254
2255 uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2256 {
2257         switch (type) {
2258         case SMU_SoftRegisters:
2259                 switch (member) {
2260                 case HandshakeDisables:
2261                         return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2262                 case VoltageChangeTimeout:
2263                         return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2264                 case AverageGraphicsActivity:
2265                         return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2266                 case PreVBlankGap:
2267                         return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2268                 case VBlankTimeout:
2269                         return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2270                 }
2271         case SMU_Discrete_DpmTable:
2272                 switch (member) {
2273                 case LowSclkInterruptThreshold:
2274                         return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2275                 }
2276         }
2277         pr_debug("can't get the offset of type %x member %x\n", type, member);
2278         return 0;
2279 }
2280
2281 uint32_t ci_get_mac_definition(uint32_t value)
2282 {
2283         switch (value) {
2284         case SMU_MAX_LEVELS_GRAPHICS:
2285                 return SMU7_MAX_LEVELS_GRAPHICS;
2286         case SMU_MAX_LEVELS_MEMORY:
2287                 return SMU7_MAX_LEVELS_MEMORY;
2288         case SMU_MAX_LEVELS_LINK:
2289                 return SMU7_MAX_LEVELS_LINK;
2290         case SMU_MAX_ENTRIES_SMIO:
2291                 return SMU7_MAX_ENTRIES_SMIO;
2292         case SMU_MAX_LEVELS_VDDC:
2293                 return SMU7_MAX_LEVELS_VDDC;
2294         case SMU_MAX_LEVELS_VDDCI:
2295                 return SMU7_MAX_LEVELS_VDDCI;
2296         case SMU_MAX_LEVELS_MVDD:
2297                 return SMU7_MAX_LEVELS_MVDD;
2298         }
2299
2300         pr_debug("can't get the mac of %x\n", value);
2301         return 0;
2302 }
2303
2304 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2305 {
2306         uint32_t byte_count, start_addr;
2307         uint8_t *src;
2308         uint32_t data;
2309
2310         struct cgs_firmware_info info = {0};
2311
2312         cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2313
2314         hwmgr->is_kicker = info.is_kicker;
2315         byte_count = info.image_size;
2316         src = (uint8_t *)info.kptr;
2317         start_addr = info.ucode_start_address;
2318
2319         if  (byte_count > SMC_RAM_END) {
2320                 pr_err("SMC address is beyond the SMC RAM area.\n");
2321                 return -EINVAL;
2322         }
2323
2324         cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2325         PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2326
2327         for (; byte_count >= 4; byte_count -= 4) {
2328                 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2329                 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2330                 src += 4;
2331         }
2332         PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2333
2334         if (0 != byte_count) {
2335                 pr_err("SMC size must be dividable by 4\n");
2336                 return -EINVAL;
2337         }
2338
2339         return 0;
2340 }
2341
2342 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2343 {
2344         if (ci_is_smc_ram_running(hwmgr)) {
2345                 pr_info("smc is running, no need to load smc firmware\n");
2346                 return 0;
2347         }
2348         PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2349                         boot_seq_done, 1);
2350         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2351                         pre_fetcher_en, 1);
2352
2353         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2354         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2355         return ci_load_smc_ucode(hwmgr);
2356 }
2357
2358 int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2359 {
2360         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2361         struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2362
2363         uint32_t tmp = 0;
2364         int result;
2365         bool error = false;
2366
2367         if (ci_upload_firmware(hwmgr))
2368                 return -EINVAL;
2369
2370         result = ci_read_smc_sram_dword(hwmgr,
2371                                 SMU7_FIRMWARE_HEADER_LOCATION +
2372                                 offsetof(SMU7_Firmware_Header, DpmTable),
2373                                 &tmp, SMC_RAM_END);
2374
2375         if (0 == result)
2376                 ci_data->dpm_table_start = tmp;
2377
2378         error |= (0 != result);
2379
2380         result = ci_read_smc_sram_dword(hwmgr,
2381                                 SMU7_FIRMWARE_HEADER_LOCATION +
2382                                 offsetof(SMU7_Firmware_Header, SoftRegisters),
2383                                 &tmp, SMC_RAM_END);
2384
2385         if (0 == result) {
2386                 data->soft_regs_start = tmp;
2387                 ci_data->soft_regs_start = tmp;
2388         }
2389
2390         error |= (0 != result);
2391
2392         result = ci_read_smc_sram_dword(hwmgr,
2393                                 SMU7_FIRMWARE_HEADER_LOCATION +
2394                                 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2395                                 &tmp, SMC_RAM_END);
2396
2397         if (0 == result)
2398                 ci_data->mc_reg_table_start = tmp;
2399
2400         result = ci_read_smc_sram_dword(hwmgr,
2401                                 SMU7_FIRMWARE_HEADER_LOCATION +
2402                                 offsetof(SMU7_Firmware_Header, FanTable),
2403                                 &tmp, SMC_RAM_END);
2404
2405         if (0 == result)
2406                 ci_data->fan_table_start = tmp;
2407
2408         error |= (0 != result);
2409
2410         result = ci_read_smc_sram_dword(hwmgr,
2411                                 SMU7_FIRMWARE_HEADER_LOCATION +
2412                                 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2413                                 &tmp, SMC_RAM_END);
2414
2415         if (0 == result)
2416                 ci_data->arb_table_start = tmp;
2417
2418         error |= (0 != result);
2419
2420         result = ci_read_smc_sram_dword(hwmgr,
2421                                 SMU7_FIRMWARE_HEADER_LOCATION +
2422                                 offsetof(SMU7_Firmware_Header, Version),
2423                                 &tmp, SMC_RAM_END);
2424
2425         if (0 == result)
2426                 hwmgr->microcode_version_info.SMC = tmp;
2427
2428         error |= (0 != result);
2429
2430         return error ? 1 : 0;
2431 }
2432
2433 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2434 {
2435         return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2436 }
2437
2438 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2439 {
2440         bool result = true;
2441
2442         switch (in_reg) {
2443         case  mmMC_SEQ_RAS_TIMING:
2444                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2445                 break;
2446
2447         case  mmMC_SEQ_DLL_STBY:
2448                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2449                 break;
2450
2451         case  mmMC_SEQ_G5PDX_CMD0:
2452                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2453                 break;
2454
2455         case  mmMC_SEQ_G5PDX_CMD1:
2456                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2457                 break;
2458
2459         case  mmMC_SEQ_G5PDX_CTRL:
2460                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2461                 break;
2462
2463         case mmMC_SEQ_CAS_TIMING:
2464                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2465                 break;
2466
2467         case mmMC_SEQ_MISC_TIMING:
2468                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2469                 break;
2470
2471         case mmMC_SEQ_MISC_TIMING2:
2472                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2473                 break;
2474
2475         case mmMC_SEQ_PMG_DVS_CMD:
2476                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2477                 break;
2478
2479         case mmMC_SEQ_PMG_DVS_CTL:
2480                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2481                 break;
2482
2483         case mmMC_SEQ_RD_CTL_D0:
2484                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2485                 break;
2486
2487         case mmMC_SEQ_RD_CTL_D1:
2488                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2489                 break;
2490
2491         case mmMC_SEQ_WR_CTL_D0:
2492                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2493                 break;
2494
2495         case mmMC_SEQ_WR_CTL_D1:
2496                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2497                 break;
2498
2499         case mmMC_PMG_CMD_EMRS:
2500                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2501                 break;
2502
2503         case mmMC_PMG_CMD_MRS:
2504                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2505                 break;
2506
2507         case mmMC_PMG_CMD_MRS1:
2508                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2509                 break;
2510
2511         case mmMC_SEQ_PMG_TIMING:
2512                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2513                 break;
2514
2515         case mmMC_PMG_CMD_MRS2:
2516                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2517                 break;
2518
2519         case mmMC_SEQ_WR_CTL_2:
2520                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2521                 break;
2522
2523         default:
2524                 result = false;
2525                 break;
2526         }
2527
2528         return result;
2529 }
2530
2531 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2532 {
2533         uint32_t i;
2534         uint16_t address;
2535
2536         for (i = 0; i < table->last; i++) {
2537                 table->mc_reg_address[i].s0 =
2538                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2539                         ? address : table->mc_reg_address[i].s1;
2540         }
2541         return 0;
2542 }
2543
2544 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2545                                         struct ci_mc_reg_table *ni_table)
2546 {
2547         uint8_t i, j;
2548
2549         PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2550                 "Invalid VramInfo table.", return -EINVAL);
2551         PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2552                 "Invalid VramInfo table.", return -EINVAL);
2553
2554         for (i = 0; i < table->last; i++)
2555                 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2556
2557         ni_table->last = table->last;
2558
2559         for (i = 0; i < table->num_entries; i++) {
2560                 ni_table->mc_reg_table_entry[i].mclk_max =
2561                         table->mc_reg_table_entry[i].mclk_max;
2562                 for (j = 0; j < table->last; j++) {
2563                         ni_table->mc_reg_table_entry[i].mc_data[j] =
2564                                 table->mc_reg_table_entry[i].mc_data[j];
2565                 }
2566         }
2567
2568         ni_table->num_entries = table->num_entries;
2569
2570         return 0;
2571 }
2572
2573 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2574                                         struct ci_mc_reg_table *table)
2575 {
2576         uint8_t i, j, k;
2577         uint32_t temp_reg;
2578         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2579
2580         for (i = 0, j = table->last; i < table->last; i++) {
2581                 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2582                         "Invalid VramInfo table.", return -EINVAL);
2583
2584                 switch (table->mc_reg_address[i].s1) {
2585
2586                 case mmMC_SEQ_MISC1:
2587                         temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2588                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2589                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2590                         for (k = 0; k < table->num_entries; k++) {
2591                                 table->mc_reg_table_entry[k].mc_data[j] =
2592                                         ((temp_reg & 0xffff0000)) |
2593                                         ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2594                         }
2595                         j++;
2596                         PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2597                                 "Invalid VramInfo table.", return -EINVAL);
2598
2599                         temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2600                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2601                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2602                         for (k = 0; k < table->num_entries; k++) {
2603                                 table->mc_reg_table_entry[k].mc_data[j] =
2604                                         (temp_reg & 0xffff0000) |
2605                                         (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2606
2607                                 if (!data->is_memory_gddr5)
2608                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2609                         }
2610                         j++;
2611                         PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2612                                 "Invalid VramInfo table.", return -EINVAL);
2613
2614                         if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2615                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2616                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2617                                 for (k = 0; k < table->num_entries; k++) {
2618                                         table->mc_reg_table_entry[k].mc_data[j] =
2619                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2620                                 }
2621                                 j++;
2622                                 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2623                                         "Invalid VramInfo table.", return -EINVAL);
2624                         }
2625
2626                         break;
2627
2628                 case mmMC_SEQ_RESERVE_M:
2629                         temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2630                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2631                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2632                         for (k = 0; k < table->num_entries; k++) {
2633                                 table->mc_reg_table_entry[k].mc_data[j] =
2634                                         (temp_reg & 0xffff0000) |
2635                                         (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2636                         }
2637                         j++;
2638                         PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2639                                 "Invalid VramInfo table.", return -EINVAL);
2640                         break;
2641
2642                 default:
2643                         break;
2644                 }
2645
2646         }
2647
2648         table->last = j;
2649
2650         return 0;
2651 }
2652
2653 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2654 {
2655         uint8_t i, j;
2656
2657         for (i = 0; i < table->last; i++) {
2658                 for (j = 1; j < table->num_entries; j++) {
2659                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2660                                 table->mc_reg_table_entry[j].mc_data[i]) {
2661                                 table->validflag |= (1 << i);
2662                                 break;
2663                         }
2664                 }
2665         }
2666
2667         return 0;
2668 }
2669
2670 int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2671 {
2672         int result;
2673         struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2674         pp_atomctrl_mc_reg_table *table;
2675         struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2676         uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2677
2678         table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2679
2680         if (NULL == table)
2681                 return -ENOMEM;
2682
2683         /* Program additional LP registers that are no longer programmed by VBIOS */
2684         cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2685         cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2686         cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2687         cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2688         cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2689         cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2690         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2691         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2692         cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2693         cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2694         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2695         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2696         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2697         cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2698         cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2699         cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2700         cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2701         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2702         cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2703         cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2704
2705         memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2706
2707         result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2708
2709         if (0 == result)
2710                 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2711
2712         if (0 == result) {
2713                 ci_set_s0_mc_reg_index(ni_table);
2714                 result = ci_set_mc_special_registers(hwmgr, ni_table);
2715         }
2716
2717         if (0 == result)
2718                 ci_set_valid_flag(ni_table);
2719
2720         kfree(table);
2721
2722         return result;
2723 }
2724
2725 bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2726 {
2727         return ci_is_smc_ram_running(hwmgr);
2728 }
2729
2730 int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2731                 struct amd_pp_profile *request)
2732 {
2733         struct ci_smumgr *smu_data = (struct ci_smumgr *)
2734                         (hwmgr->smu_backend);
2735         struct SMU7_Discrete_GraphicsLevel *levels =
2736                         smu_data->smc_state_table.GraphicsLevel;
2737         uint32_t array = smu_data->dpm_table_start +
2738                         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2739         uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
2740                         SMU7_MAX_LEVELS_GRAPHICS;
2741         uint32_t i;
2742
2743         for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2744                 levels[i].ActivityLevel =
2745                                 cpu_to_be16(request->activity_threshold);
2746                 levels[i].EnabledForActivity = 1;
2747                 levels[i].UpH = request->up_hyst;
2748                 levels[i].DownH = request->down_hyst;
2749         }
2750
2751         return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2752                                 array_size, SMC_RAM_END);
2753 }