Merge tag 'trace-v4.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[muen/linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "dcn/dcn_1_0_offset.h"
63 #include "dcn/dcn_1_0_sh_mask.h"
64 #include "soc15_hw_ip.h"
65 #include "vega10_ip_offset.h"
66
67 #include "soc15_common.h"
68 #endif
69
70 #include "modules/inc/mod_freesync.h"
71
72 #include "i2caux_interface.h"
73
74 /* basic init/fini API */
75 static int amdgpu_dm_init(struct amdgpu_device *adev);
76 static void amdgpu_dm_fini(struct amdgpu_device *adev);
77
78 /* initializes drm_device display related structures, based on the information
79  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
80  * drm_encoder, drm_mode_config
81  *
82  * Returns 0 on success
83  */
84 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
85 /* removes and deallocates the drm structures, created by the above function */
86 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
87
88 static void
89 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
90
91 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
92                                 struct amdgpu_plane *aplane,
93                                 unsigned long possible_crtcs);
94 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
95                                struct drm_plane *plane,
96                                uint32_t link_index);
97 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
98                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
99                                     uint32_t link_index,
100                                     struct amdgpu_encoder *amdgpu_encoder);
101 static int amdgpu_dm_encoder_init(struct drm_device *dev,
102                                   struct amdgpu_encoder *aencoder,
103                                   uint32_t link_index);
104
105 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
106
107 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
108                                    struct drm_atomic_state *state,
109                                    bool nonblock);
110
111 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
112
113 static int amdgpu_dm_atomic_check(struct drm_device *dev,
114                                   struct drm_atomic_state *state);
115
116
117
118
119 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
120         DRM_PLANE_TYPE_PRIMARY,
121         DRM_PLANE_TYPE_PRIMARY,
122         DRM_PLANE_TYPE_PRIMARY,
123         DRM_PLANE_TYPE_PRIMARY,
124         DRM_PLANE_TYPE_PRIMARY,
125         DRM_PLANE_TYPE_PRIMARY,
126 };
127
128 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
129         DRM_PLANE_TYPE_PRIMARY,
130         DRM_PLANE_TYPE_PRIMARY,
131         DRM_PLANE_TYPE_PRIMARY,
132         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
133 };
134
135 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
136         DRM_PLANE_TYPE_PRIMARY,
137         DRM_PLANE_TYPE_PRIMARY,
138         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
139 };
140
141 /*
142  * dm_vblank_get_counter
143  *
144  * @brief
145  * Get counter for number of vertical blanks
146  *
147  * @param
148  * struct amdgpu_device *adev - [in] desired amdgpu device
149  * int disp_idx - [in] which CRTC to get the counter from
150  *
151  * @return
152  * Counter for vertical blanks
153  */
154 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
155 {
156         if (crtc >= adev->mode_info.num_crtc)
157                 return 0;
158         else {
159                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
160                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
161                                 acrtc->base.state);
162
163
164                 if (acrtc_state->stream == NULL) {
165                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
166                                   crtc);
167                         return 0;
168                 }
169
170                 return dc_stream_get_vblank_counter(acrtc_state->stream);
171         }
172 }
173
174 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
175                                   u32 *vbl, u32 *position)
176 {
177         uint32_t v_blank_start, v_blank_end, h_position, v_position;
178
179         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
180                 return -EINVAL;
181         else {
182                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
183                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
184                                                 acrtc->base.state);
185
186                 if (acrtc_state->stream ==  NULL) {
187                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
188                                   crtc);
189                         return 0;
190                 }
191
192                 /*
193                  * TODO rework base driver to use values directly.
194                  * for now parse it back into reg-format
195                  */
196                 dc_stream_get_scanoutpos(acrtc_state->stream,
197                                          &v_blank_start,
198                                          &v_blank_end,
199                                          &h_position,
200                                          &v_position);
201
202                 *position = v_position | (h_position << 16);
203                 *vbl = v_blank_start | (v_blank_end << 16);
204         }
205
206         return 0;
207 }
208
209 static bool dm_is_idle(void *handle)
210 {
211         /* XXX todo */
212         return true;
213 }
214
215 static int dm_wait_for_idle(void *handle)
216 {
217         /* XXX todo */
218         return 0;
219 }
220
221 static bool dm_check_soft_reset(void *handle)
222 {
223         return false;
224 }
225
226 static int dm_soft_reset(void *handle)
227 {
228         /* XXX todo */
229         return 0;
230 }
231
232 static struct amdgpu_crtc *
233 get_crtc_by_otg_inst(struct amdgpu_device *adev,
234                      int otg_inst)
235 {
236         struct drm_device *dev = adev->ddev;
237         struct drm_crtc *crtc;
238         struct amdgpu_crtc *amdgpu_crtc;
239
240         /*
241          * following if is check inherited from both functions where this one is
242          * used now. Need to be checked why it could happen.
243          */
244         if (otg_inst == -1) {
245                 WARN_ON(1);
246                 return adev->mode_info.crtcs[0];
247         }
248
249         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
250                 amdgpu_crtc = to_amdgpu_crtc(crtc);
251
252                 if (amdgpu_crtc->otg_inst == otg_inst)
253                         return amdgpu_crtc;
254         }
255
256         return NULL;
257 }
258
259 static void dm_pflip_high_irq(void *interrupt_params)
260 {
261         struct amdgpu_crtc *amdgpu_crtc;
262         struct common_irq_params *irq_params = interrupt_params;
263         struct amdgpu_device *adev = irq_params->adev;
264         unsigned long flags;
265
266         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
267
268         /* IRQ could occur when in initial stage */
269         /*TODO work and BO cleanup */
270         if (amdgpu_crtc == NULL) {
271                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
272                 return;
273         }
274
275         spin_lock_irqsave(&adev->ddev->event_lock, flags);
276
277         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
278                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
279                                                  amdgpu_crtc->pflip_status,
280                                                  AMDGPU_FLIP_SUBMITTED,
281                                                  amdgpu_crtc->crtc_id,
282                                                  amdgpu_crtc);
283                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
284                 return;
285         }
286
287
288         /* wakeup usersapce */
289         if (amdgpu_crtc->event) {
290                 /* Update to correct count/ts if racing with vblank irq */
291                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
292
293                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
294
295                 /* page flip completed. clean up */
296                 amdgpu_crtc->event = NULL;
297
298         } else
299                 WARN_ON(1);
300
301         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
302         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
303
304         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
305                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
306
307         drm_crtc_vblank_put(&amdgpu_crtc->base);
308 }
309
310 static void dm_crtc_high_irq(void *interrupt_params)
311 {
312         struct common_irq_params *irq_params = interrupt_params;
313         struct amdgpu_device *adev = irq_params->adev;
314         uint8_t crtc_index = 0;
315         struct amdgpu_crtc *acrtc;
316
317         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
318
319         if (acrtc)
320                 crtc_index = acrtc->crtc_id;
321
322         drm_handle_vblank(adev->ddev, crtc_index);
323         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
324 }
325
326 static int dm_set_clockgating_state(void *handle,
327                   enum amd_clockgating_state state)
328 {
329         return 0;
330 }
331
332 static int dm_set_powergating_state(void *handle,
333                   enum amd_powergating_state state)
334 {
335         return 0;
336 }
337
338 /* Prototypes of private functions */
339 static int dm_early_init(void* handle);
340
341 static void hotplug_notify_work_func(struct work_struct *work)
342 {
343         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
344         struct drm_device *dev = dm->ddev;
345
346         drm_kms_helper_hotplug_event(dev);
347 }
348
349 #if defined(CONFIG_DRM_AMD_DC_FBC)
350 /* Allocate memory for FBC compressed data  */
351 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
352 {
353         struct drm_device *dev = connector->dev;
354         struct amdgpu_device *adev = dev->dev_private;
355         struct dm_comressor_info *compressor = &adev->dm.compressor;
356         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
357         struct drm_display_mode *mode;
358         unsigned long max_size = 0;
359
360         if (adev->dm.dc->fbc_compressor == NULL)
361                 return;
362
363         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
364                 return;
365
366         if (compressor->bo_ptr)
367                 return;
368
369
370         list_for_each_entry(mode, &connector->modes, head) {
371                 if (max_size < mode->htotal * mode->vtotal)
372                         max_size = mode->htotal * mode->vtotal;
373         }
374
375         if (max_size) {
376                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
377                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
378                             &compressor->gpu_addr, &compressor->cpu_addr);
379
380                 if (r)
381                         DRM_ERROR("DM: Failed to initialize FBC\n");
382                 else {
383                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
384                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
385                 }
386
387         }
388
389 }
390 #endif
391
392
393 /* Init display KMS
394  *
395  * Returns 0 on success
396  */
397 static int amdgpu_dm_init(struct amdgpu_device *adev)
398 {
399         struct dc_init_data init_data;
400         adev->dm.ddev = adev->ddev;
401         adev->dm.adev = adev;
402
403         /* Zero all the fields */
404         memset(&init_data, 0, sizeof(init_data));
405
406         if(amdgpu_dm_irq_init(adev)) {
407                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
408                 goto error;
409         }
410
411         init_data.asic_id.chip_family = adev->family;
412
413         init_data.asic_id.pci_revision_id = adev->rev_id;
414         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
415
416         init_data.asic_id.vram_width = adev->gmc.vram_width;
417         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
418         init_data.asic_id.atombios_base_address =
419                 adev->mode_info.atom_context->bios;
420
421         init_data.driver = adev;
422
423         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
424
425         if (!adev->dm.cgs_device) {
426                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
427                 goto error;
428         }
429
430         init_data.cgs_device = adev->dm.cgs_device;
431
432         adev->dm.dal = NULL;
433
434         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
435
436         if (amdgpu_dc_log)
437                 init_data.log_mask = DC_DEFAULT_LOG_MASK;
438         else
439                 init_data.log_mask = DC_MIN_LOG_MASK;
440
441         /*
442          * TODO debug why this doesn't work on Raven
443          */
444         if (adev->flags & AMD_IS_APU &&
445             adev->asic_type >= CHIP_CARRIZO &&
446             adev->asic_type < CHIP_RAVEN)
447                 init_data.flags.gpu_vm_support = true;
448
449         /* Display Core create. */
450         adev->dm.dc = dc_create(&init_data);
451
452         if (adev->dm.dc) {
453                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
454         } else {
455                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
456                 goto error;
457         }
458
459         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
460
461         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
462         if (!adev->dm.freesync_module) {
463                 DRM_ERROR(
464                 "amdgpu: failed to initialize freesync_module.\n");
465         } else
466                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
467                                 adev->dm.freesync_module);
468
469         amdgpu_dm_init_color_mod();
470
471         if (amdgpu_dm_initialize_drm_device(adev)) {
472                 DRM_ERROR(
473                 "amdgpu: failed to initialize sw for display support.\n");
474                 goto error;
475         }
476
477         /* Update the actual used number of crtc */
478         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
479
480         /* TODO: Add_display_info? */
481
482         /* TODO use dynamic cursor width */
483         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
484         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
485
486         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
487                 DRM_ERROR(
488                 "amdgpu: failed to initialize sw for display support.\n");
489                 goto error;
490         }
491
492         DRM_DEBUG_DRIVER("KMS initialized.\n");
493
494         return 0;
495 error:
496         amdgpu_dm_fini(adev);
497
498         return -1;
499 }
500
501 static void amdgpu_dm_fini(struct amdgpu_device *adev)
502 {
503         amdgpu_dm_destroy_drm_device(&adev->dm);
504         /*
505          * TODO: pageflip, vlank interrupt
506          *
507          * amdgpu_dm_irq_fini(adev);
508          */
509
510         if (adev->dm.cgs_device) {
511                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
512                 adev->dm.cgs_device = NULL;
513         }
514         if (adev->dm.freesync_module) {
515                 mod_freesync_destroy(adev->dm.freesync_module);
516                 adev->dm.freesync_module = NULL;
517         }
518         /* DC Destroy TODO: Replace destroy DAL */
519         if (adev->dm.dc)
520                 dc_destroy(&adev->dm.dc);
521         return;
522 }
523
524 static int dm_sw_init(void *handle)
525 {
526         return 0;
527 }
528
529 static int dm_sw_fini(void *handle)
530 {
531         return 0;
532 }
533
534 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
535 {
536         struct amdgpu_dm_connector *aconnector;
537         struct drm_connector *connector;
538         int ret = 0;
539
540         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
541
542         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
543                 aconnector = to_amdgpu_dm_connector(connector);
544                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
545                     aconnector->mst_mgr.aux) {
546                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
547                                         aconnector, aconnector->base.base.id);
548
549                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
550                         if (ret < 0) {
551                                 DRM_ERROR("DM_MST: Failed to start MST\n");
552                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
553                                 return ret;
554                                 }
555                         }
556         }
557
558         drm_modeset_unlock(&dev->mode_config.connection_mutex);
559         return ret;
560 }
561
562 static int dm_late_init(void *handle)
563 {
564         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565
566         return detect_mst_link_for_all_connectors(adev->ddev);
567 }
568
569 static void s3_handle_mst(struct drm_device *dev, bool suspend)
570 {
571         struct amdgpu_dm_connector *aconnector;
572         struct drm_connector *connector;
573
574         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
575
576         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
577                    aconnector = to_amdgpu_dm_connector(connector);
578                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
579                                    !aconnector->mst_port) {
580
581                            if (suspend)
582                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
583                            else
584                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
585                    }
586         }
587
588         drm_modeset_unlock(&dev->mode_config.connection_mutex);
589 }
590
591 static int dm_hw_init(void *handle)
592 {
593         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
594         /* Create DAL display manager */
595         amdgpu_dm_init(adev);
596         amdgpu_dm_hpd_init(adev);
597
598         return 0;
599 }
600
601 static int dm_hw_fini(void *handle)
602 {
603         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604
605         amdgpu_dm_hpd_fini(adev);
606
607         amdgpu_dm_irq_fini(adev);
608         amdgpu_dm_fini(adev);
609         return 0;
610 }
611
612 static int dm_suspend(void *handle)
613 {
614         struct amdgpu_device *adev = handle;
615         struct amdgpu_display_manager *dm = &adev->dm;
616         int ret = 0;
617
618         s3_handle_mst(adev->ddev, true);
619
620         amdgpu_dm_irq_suspend(adev);
621
622         WARN_ON(adev->dm.cached_state);
623         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
624
625         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
626
627         return ret;
628 }
629
630 static struct amdgpu_dm_connector *
631 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
632                                              struct drm_crtc *crtc)
633 {
634         uint32_t i;
635         struct drm_connector_state *new_con_state;
636         struct drm_connector *connector;
637         struct drm_crtc *crtc_from_state;
638
639         for_each_new_connector_in_state(state, connector, new_con_state, i) {
640                 crtc_from_state = new_con_state->crtc;
641
642                 if (crtc_from_state == crtc)
643                         return to_amdgpu_dm_connector(connector);
644         }
645
646         return NULL;
647 }
648
649 static int dm_resume(void *handle)
650 {
651         struct amdgpu_device *adev = handle;
652         struct amdgpu_display_manager *dm = &adev->dm;
653         int ret = 0;
654
655         /* power on hardware */
656         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
657
658         ret = amdgpu_dm_display_resume(adev);
659         return ret;
660 }
661
662 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
663 {
664         struct drm_device *ddev = adev->ddev;
665         struct amdgpu_display_manager *dm = &adev->dm;
666         struct amdgpu_dm_connector *aconnector;
667         struct drm_connector *connector;
668         struct drm_crtc *crtc;
669         struct drm_crtc_state *new_crtc_state;
670         struct dm_crtc_state *dm_new_crtc_state;
671         struct drm_plane *plane;
672         struct drm_plane_state *new_plane_state;
673         struct dm_plane_state *dm_new_plane_state;
674
675         int ret = 0;
676         int i;
677
678         /* program HPD filter */
679         dc_resume(dm->dc);
680
681         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
682         s3_handle_mst(ddev, false);
683
684         /*
685          * early enable HPD Rx IRQ, should be done before set mode as short
686          * pulse interrupts are used for MST
687          */
688         amdgpu_dm_irq_resume_early(adev);
689
690         /* Do detection*/
691         list_for_each_entry(connector,
692                         &ddev->mode_config.connector_list, head) {
693                 aconnector = to_amdgpu_dm_connector(connector);
694
695                 /*
696                  * this is the case when traversing through already created
697                  * MST connectors, should be skipped
698                  */
699                 if (aconnector->mst_port)
700                         continue;
701
702                 mutex_lock(&aconnector->hpd_lock);
703                 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
704
705                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
706                         aconnector->fake_enable = false;
707
708                 aconnector->dc_sink = NULL;
709                 amdgpu_dm_update_connector_after_detect(aconnector);
710                 mutex_unlock(&aconnector->hpd_lock);
711         }
712
713         /* Force mode set in atomic comit */
714         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
715                 new_crtc_state->active_changed = true;
716
717         /*
718          * atomic_check is expected to create the dc states. We need to release
719          * them here, since they were duplicated as part of the suspend
720          * procedure.
721          */
722         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
723                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
724                 if (dm_new_crtc_state->stream) {
725                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
726                         dc_stream_release(dm_new_crtc_state->stream);
727                         dm_new_crtc_state->stream = NULL;
728                 }
729         }
730
731         for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
732                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
733                 if (dm_new_plane_state->dc_state) {
734                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
735                         dc_plane_state_release(dm_new_plane_state->dc_state);
736                         dm_new_plane_state->dc_state = NULL;
737                 }
738         }
739
740         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
741
742         adev->dm.cached_state = NULL;
743
744         amdgpu_dm_irq_resume_late(adev);
745
746         return ret;
747 }
748
749 static const struct amd_ip_funcs amdgpu_dm_funcs = {
750         .name = "dm",
751         .early_init = dm_early_init,
752         .late_init = dm_late_init,
753         .sw_init = dm_sw_init,
754         .sw_fini = dm_sw_fini,
755         .hw_init = dm_hw_init,
756         .hw_fini = dm_hw_fini,
757         .suspend = dm_suspend,
758         .resume = dm_resume,
759         .is_idle = dm_is_idle,
760         .wait_for_idle = dm_wait_for_idle,
761         .check_soft_reset = dm_check_soft_reset,
762         .soft_reset = dm_soft_reset,
763         .set_clockgating_state = dm_set_clockgating_state,
764         .set_powergating_state = dm_set_powergating_state,
765 };
766
767 const struct amdgpu_ip_block_version dm_ip_block =
768 {
769         .type = AMD_IP_BLOCK_TYPE_DCE,
770         .major = 1,
771         .minor = 0,
772         .rev = 0,
773         .funcs = &amdgpu_dm_funcs,
774 };
775
776
777 static struct drm_atomic_state *
778 dm_atomic_state_alloc(struct drm_device *dev)
779 {
780         struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
781
782         if (!state)
783                 return NULL;
784
785         if (drm_atomic_state_init(dev, &state->base) < 0)
786                 goto fail;
787
788         return &state->base;
789
790 fail:
791         kfree(state);
792         return NULL;
793 }
794
795 static void
796 dm_atomic_state_clear(struct drm_atomic_state *state)
797 {
798         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
799
800         if (dm_state->context) {
801                 dc_release_state(dm_state->context);
802                 dm_state->context = NULL;
803         }
804
805         drm_atomic_state_default_clear(state);
806 }
807
808 static void
809 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
810 {
811         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
812         drm_atomic_state_default_release(state);
813         kfree(dm_state);
814 }
815
816 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
817         .fb_create = amdgpu_display_user_framebuffer_create,
818         .output_poll_changed = drm_fb_helper_output_poll_changed,
819         .atomic_check = amdgpu_dm_atomic_check,
820         .atomic_commit = amdgpu_dm_atomic_commit,
821         .atomic_state_alloc = dm_atomic_state_alloc,
822         .atomic_state_clear = dm_atomic_state_clear,
823         .atomic_state_free = dm_atomic_state_alloc_free
824 };
825
826 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
827         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
828 };
829
830 static void
831 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
832 {
833         struct drm_connector *connector = &aconnector->base;
834         struct drm_device *dev = connector->dev;
835         struct dc_sink *sink;
836
837         /* MST handled by drm_mst framework */
838         if (aconnector->mst_mgr.mst_state == true)
839                 return;
840
841
842         sink = aconnector->dc_link->local_sink;
843
844         /* Edid mgmt connector gets first update only in mode_valid hook and then
845          * the connector sink is set to either fake or physical sink depends on link status.
846          * don't do it here if u are during boot
847          */
848         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
849                         && aconnector->dc_em_sink) {
850
851                 /* For S3 resume with headless use eml_sink to fake stream
852                  * because on resume connecotr->sink is set ti NULL
853                  */
854                 mutex_lock(&dev->mode_config.mutex);
855
856                 if (sink) {
857                         if (aconnector->dc_sink) {
858                                 amdgpu_dm_remove_sink_from_freesync_module(
859                                                                 connector);
860                                 /* retain and release bellow are used for
861                                  * bump up refcount for sink because the link don't point
862                                  * to it anymore after disconnect so on next crtc to connector
863                                  * reshuffle by UMD we will get into unwanted dc_sink release
864                                  */
865                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
866                                         dc_sink_release(aconnector->dc_sink);
867                         }
868                         aconnector->dc_sink = sink;
869                         amdgpu_dm_add_sink_to_freesync_module(
870                                                 connector, aconnector->edid);
871                 } else {
872                         amdgpu_dm_remove_sink_from_freesync_module(connector);
873                         if (!aconnector->dc_sink)
874                                 aconnector->dc_sink = aconnector->dc_em_sink;
875                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
876                                 dc_sink_retain(aconnector->dc_sink);
877                 }
878
879                 mutex_unlock(&dev->mode_config.mutex);
880                 return;
881         }
882
883         /*
884          * TODO: temporary guard to look for proper fix
885          * if this sink is MST sink, we should not do anything
886          */
887         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
888                 return;
889
890         if (aconnector->dc_sink == sink) {
891                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
892                  * Do nothing!! */
893                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
894                                 aconnector->connector_id);
895                 return;
896         }
897
898         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
899                 aconnector->connector_id, aconnector->dc_sink, sink);
900
901         mutex_lock(&dev->mode_config.mutex);
902
903         /* 1. Update status of the drm connector
904          * 2. Send an event and let userspace tell us what to do */
905         if (sink) {
906                 /* TODO: check if we still need the S3 mode update workaround.
907                  * If yes, put it here. */
908                 if (aconnector->dc_sink)
909                         amdgpu_dm_remove_sink_from_freesync_module(
910                                                         connector);
911
912                 aconnector->dc_sink = sink;
913                 if (sink->dc_edid.length == 0) {
914                         aconnector->edid = NULL;
915                 } else {
916                         aconnector->edid =
917                                 (struct edid *) sink->dc_edid.raw_edid;
918
919
920                         drm_mode_connector_update_edid_property(connector,
921                                         aconnector->edid);
922                 }
923                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
924
925         } else {
926                 amdgpu_dm_remove_sink_from_freesync_module(connector);
927                 drm_mode_connector_update_edid_property(connector, NULL);
928                 aconnector->num_modes = 0;
929                 aconnector->dc_sink = NULL;
930         }
931
932         mutex_unlock(&dev->mode_config.mutex);
933 }
934
935 static void handle_hpd_irq(void *param)
936 {
937         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
938         struct drm_connector *connector = &aconnector->base;
939         struct drm_device *dev = connector->dev;
940
941         /* In case of failure or MST no need to update connector status or notify the OS
942          * since (for MST case) MST does this in it's own context.
943          */
944         mutex_lock(&aconnector->hpd_lock);
945
946         if (aconnector->fake_enable)
947                 aconnector->fake_enable = false;
948
949         if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
950                 amdgpu_dm_update_connector_after_detect(aconnector);
951
952
953                 drm_modeset_lock_all(dev);
954                 dm_restore_drm_connector_state(dev, connector);
955                 drm_modeset_unlock_all(dev);
956
957                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
958                         drm_kms_helper_hotplug_event(dev);
959         }
960         mutex_unlock(&aconnector->hpd_lock);
961
962 }
963
964 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
965 {
966         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
967         uint8_t dret;
968         bool new_irq_handled = false;
969         int dpcd_addr;
970         int dpcd_bytes_to_read;
971
972         const int max_process_count = 30;
973         int process_count = 0;
974
975         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
976
977         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
978                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
979                 /* DPCD 0x200 - 0x201 for downstream IRQ */
980                 dpcd_addr = DP_SINK_COUNT;
981         } else {
982                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
983                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
984                 dpcd_addr = DP_SINK_COUNT_ESI;
985         }
986
987         dret = drm_dp_dpcd_read(
988                 &aconnector->dm_dp_aux.aux,
989                 dpcd_addr,
990                 esi,
991                 dpcd_bytes_to_read);
992
993         while (dret == dpcd_bytes_to_read &&
994                 process_count < max_process_count) {
995                 uint8_t retry;
996                 dret = 0;
997
998                 process_count++;
999
1000                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1001                 /* handle HPD short pulse irq */
1002                 if (aconnector->mst_mgr.mst_state)
1003                         drm_dp_mst_hpd_irq(
1004                                 &aconnector->mst_mgr,
1005                                 esi,
1006                                 &new_irq_handled);
1007
1008                 if (new_irq_handled) {
1009                         /* ACK at DPCD to notify down stream */
1010                         const int ack_dpcd_bytes_to_write =
1011                                 dpcd_bytes_to_read - 1;
1012
1013                         for (retry = 0; retry < 3; retry++) {
1014                                 uint8_t wret;
1015
1016                                 wret = drm_dp_dpcd_write(
1017                                         &aconnector->dm_dp_aux.aux,
1018                                         dpcd_addr + 1,
1019                                         &esi[1],
1020                                         ack_dpcd_bytes_to_write);
1021                                 if (wret == ack_dpcd_bytes_to_write)
1022                                         break;
1023                         }
1024
1025                         /* check if there is new irq to be handle */
1026                         dret = drm_dp_dpcd_read(
1027                                 &aconnector->dm_dp_aux.aux,
1028                                 dpcd_addr,
1029                                 esi,
1030                                 dpcd_bytes_to_read);
1031
1032                         new_irq_handled = false;
1033                 } else {
1034                         break;
1035                 }
1036         }
1037
1038         if (process_count == max_process_count)
1039                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1040 }
1041
1042 static void handle_hpd_rx_irq(void *param)
1043 {
1044         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1045         struct drm_connector *connector = &aconnector->base;
1046         struct drm_device *dev = connector->dev;
1047         struct dc_link *dc_link = aconnector->dc_link;
1048         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1049
1050         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1051          * conflict, after implement i2c helper, this mutex should be
1052          * retired.
1053          */
1054         if (dc_link->type != dc_connection_mst_branch)
1055                 mutex_lock(&aconnector->hpd_lock);
1056
1057         if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1058                         !is_mst_root_connector) {
1059                 /* Downstream Port status changed. */
1060                 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1061
1062                         if (aconnector->fake_enable)
1063                                 aconnector->fake_enable = false;
1064
1065                         amdgpu_dm_update_connector_after_detect(aconnector);
1066
1067
1068                         drm_modeset_lock_all(dev);
1069                         dm_restore_drm_connector_state(dev, connector);
1070                         drm_modeset_unlock_all(dev);
1071
1072                         drm_kms_helper_hotplug_event(dev);
1073                 }
1074         }
1075         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1076             (dc_link->type == dc_connection_mst_branch))
1077                 dm_handle_hpd_rx_irq(aconnector);
1078
1079         if (dc_link->type != dc_connection_mst_branch)
1080                 mutex_unlock(&aconnector->hpd_lock);
1081 }
1082
1083 static void register_hpd_handlers(struct amdgpu_device *adev)
1084 {
1085         struct drm_device *dev = adev->ddev;
1086         struct drm_connector *connector;
1087         struct amdgpu_dm_connector *aconnector;
1088         const struct dc_link *dc_link;
1089         struct dc_interrupt_params int_params = {0};
1090
1091         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1092         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1093
1094         list_for_each_entry(connector,
1095                         &dev->mode_config.connector_list, head) {
1096
1097                 aconnector = to_amdgpu_dm_connector(connector);
1098                 dc_link = aconnector->dc_link;
1099
1100                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1101                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1102                         int_params.irq_source = dc_link->irq_source_hpd;
1103
1104                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1105                                         handle_hpd_irq,
1106                                         (void *) aconnector);
1107                 }
1108
1109                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1110
1111                         /* Also register for DP short pulse (hpd_rx). */
1112                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1113                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1114
1115                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1116                                         handle_hpd_rx_irq,
1117                                         (void *) aconnector);
1118                 }
1119         }
1120 }
1121
1122 /* Register IRQ sources and initialize IRQ callbacks */
1123 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1124 {
1125         struct dc *dc = adev->dm.dc;
1126         struct common_irq_params *c_irq_params;
1127         struct dc_interrupt_params int_params = {0};
1128         int r;
1129         int i;
1130         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1131
1132         if (adev->asic_type == CHIP_VEGA10 ||
1133             adev->asic_type == CHIP_VEGA12 ||
1134             adev->asic_type == CHIP_RAVEN)
1135                 client_id = SOC15_IH_CLIENTID_DCE;
1136
1137         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1138         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1139
1140         /* Actions of amdgpu_irq_add_id():
1141          * 1. Register a set() function with base driver.
1142          *    Base driver will call set() function to enable/disable an
1143          *    interrupt in DC hardware.
1144          * 2. Register amdgpu_dm_irq_handler().
1145          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1146          *    coming from DC hardware.
1147          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1148          *    for acknowledging and handling. */
1149
1150         /* Use VBLANK interrupt */
1151         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1152                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1153                 if (r) {
1154                         DRM_ERROR("Failed to add crtc irq id!\n");
1155                         return r;
1156                 }
1157
1158                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1159                 int_params.irq_source =
1160                         dc_interrupt_to_irq_source(dc, i, 0);
1161
1162                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1163
1164                 c_irq_params->adev = adev;
1165                 c_irq_params->irq_src = int_params.irq_source;
1166
1167                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1168                                 dm_crtc_high_irq, c_irq_params);
1169         }
1170
1171         /* Use GRPH_PFLIP interrupt */
1172         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1173                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1174                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1175                 if (r) {
1176                         DRM_ERROR("Failed to add page flip irq id!\n");
1177                         return r;
1178                 }
1179
1180                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1181                 int_params.irq_source =
1182                         dc_interrupt_to_irq_source(dc, i, 0);
1183
1184                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1185
1186                 c_irq_params->adev = adev;
1187                 c_irq_params->irq_src = int_params.irq_source;
1188
1189                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1190                                 dm_pflip_high_irq, c_irq_params);
1191
1192         }
1193
1194         /* HPD */
1195         r = amdgpu_irq_add_id(adev, client_id,
1196                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1197         if (r) {
1198                 DRM_ERROR("Failed to add hpd irq id!\n");
1199                 return r;
1200         }
1201
1202         register_hpd_handlers(adev);
1203
1204         return 0;
1205 }
1206
1207 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1208 /* Register IRQ sources and initialize IRQ callbacks */
1209 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1210 {
1211         struct dc *dc = adev->dm.dc;
1212         struct common_irq_params *c_irq_params;
1213         struct dc_interrupt_params int_params = {0};
1214         int r;
1215         int i;
1216
1217         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1218         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1219
1220         /* Actions of amdgpu_irq_add_id():
1221          * 1. Register a set() function with base driver.
1222          *    Base driver will call set() function to enable/disable an
1223          *    interrupt in DC hardware.
1224          * 2. Register amdgpu_dm_irq_handler().
1225          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1226          *    coming from DC hardware.
1227          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1228          *    for acknowledging and handling.
1229          * */
1230
1231         /* Use VSTARTUP interrupt */
1232         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1233                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1234                         i++) {
1235                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1236
1237                 if (r) {
1238                         DRM_ERROR("Failed to add crtc irq id!\n");
1239                         return r;
1240                 }
1241
1242                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1243                 int_params.irq_source =
1244                         dc_interrupt_to_irq_source(dc, i, 0);
1245
1246                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1247
1248                 c_irq_params->adev = adev;
1249                 c_irq_params->irq_src = int_params.irq_source;
1250
1251                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1252                                 dm_crtc_high_irq, c_irq_params);
1253         }
1254
1255         /* Use GRPH_PFLIP interrupt */
1256         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1257                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1258                         i++) {
1259                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1260                 if (r) {
1261                         DRM_ERROR("Failed to add page flip irq id!\n");
1262                         return r;
1263                 }
1264
1265                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1266                 int_params.irq_source =
1267                         dc_interrupt_to_irq_source(dc, i, 0);
1268
1269                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1270
1271                 c_irq_params->adev = adev;
1272                 c_irq_params->irq_src = int_params.irq_source;
1273
1274                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1275                                 dm_pflip_high_irq, c_irq_params);
1276
1277         }
1278
1279         /* HPD */
1280         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1281                         &adev->hpd_irq);
1282         if (r) {
1283                 DRM_ERROR("Failed to add hpd irq id!\n");
1284                 return r;
1285         }
1286
1287         register_hpd_handlers(adev);
1288
1289         return 0;
1290 }
1291 #endif
1292
1293 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1294 {
1295         int r;
1296
1297         adev->mode_info.mode_config_initialized = true;
1298
1299         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1300         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1301
1302         adev->ddev->mode_config.max_width = 16384;
1303         adev->ddev->mode_config.max_height = 16384;
1304
1305         adev->ddev->mode_config.preferred_depth = 24;
1306         adev->ddev->mode_config.prefer_shadow = 1;
1307         /* indicate support of immediate flip */
1308         adev->ddev->mode_config.async_page_flip = true;
1309
1310         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1311
1312         r = amdgpu_display_modeset_create_props(adev);
1313         if (r)
1314                 return r;
1315
1316         return 0;
1317 }
1318
1319 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1320         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1321
1322 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1323 {
1324         struct amdgpu_display_manager *dm = bl_get_data(bd);
1325
1326         if (dc_link_set_backlight_level(dm->backlight_link,
1327                         bd->props.brightness, 0, 0))
1328                 return 0;
1329         else
1330                 return 1;
1331 }
1332
1333 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1334 {
1335         return bd->props.brightness;
1336 }
1337
1338 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1339         .get_brightness = amdgpu_dm_backlight_get_brightness,
1340         .update_status  = amdgpu_dm_backlight_update_status,
1341 };
1342
1343 static void
1344 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1345 {
1346         char bl_name[16];
1347         struct backlight_properties props = { 0 };
1348
1349         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1350         props.type = BACKLIGHT_RAW;
1351
1352         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1353                         dm->adev->ddev->primary->index);
1354
1355         dm->backlight_dev = backlight_device_register(bl_name,
1356                         dm->adev->ddev->dev,
1357                         dm,
1358                         &amdgpu_dm_backlight_ops,
1359                         &props);
1360
1361         if (IS_ERR(dm->backlight_dev))
1362                 DRM_ERROR("DM: Backlight registration failed!\n");
1363         else
1364                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1365 }
1366
1367 #endif
1368
1369 static int initialize_plane(struct amdgpu_display_manager *dm,
1370                              struct amdgpu_mode_info *mode_info,
1371                              int plane_id)
1372 {
1373         struct amdgpu_plane *plane;
1374         unsigned long possible_crtcs;
1375         int ret = 0;
1376
1377         plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1378         mode_info->planes[plane_id] = plane;
1379
1380         if (!plane) {
1381                 DRM_ERROR("KMS: Failed to allocate plane\n");
1382                 return -ENOMEM;
1383         }
1384         plane->base.type = mode_info->plane_type[plane_id];
1385
1386         /*
1387          * HACK: IGT tests expect that each plane can only have one
1388          * one possible CRTC. For now, set one CRTC for each
1389          * plane that is not an underlay, but still allow multiple
1390          * CRTCs for underlay planes.
1391          */
1392         possible_crtcs = 1 << plane_id;
1393         if (plane_id >= dm->dc->caps.max_streams)
1394                 possible_crtcs = 0xff;
1395
1396         ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1397
1398         if (ret) {
1399                 DRM_ERROR("KMS: Failed to initialize plane\n");
1400                 return ret;
1401         }
1402
1403         return ret;
1404 }
1405
1406 /* In this architecture, the association
1407  * connector -> encoder -> crtc
1408  * id not really requried. The crtc and connector will hold the
1409  * display_index as an abstraction to use with DAL component
1410  *
1411  * Returns 0 on success
1412  */
1413 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1414 {
1415         struct amdgpu_display_manager *dm = &adev->dm;
1416         int32_t i;
1417         struct amdgpu_dm_connector *aconnector = NULL;
1418         struct amdgpu_encoder *aencoder = NULL;
1419         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1420         uint32_t link_cnt;
1421         int32_t total_overlay_planes, total_primary_planes;
1422
1423         link_cnt = dm->dc->caps.max_links;
1424         if (amdgpu_dm_mode_config_init(dm->adev)) {
1425                 DRM_ERROR("DM: Failed to initialize mode config\n");
1426                 return -1;
1427         }
1428
1429         /* Identify the number of planes to be initialized */
1430         total_overlay_planes = dm->dc->caps.max_slave_planes;
1431         total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1432
1433         /* First initialize overlay planes, index starting after primary planes */
1434         for (i = (total_overlay_planes - 1); i >= 0; i--) {
1435                 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1436                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1437                         goto fail;
1438                 }
1439         }
1440
1441         /* Initialize primary planes */
1442         for (i = (total_primary_planes - 1); i >= 0; i--) {
1443                 if (initialize_plane(dm, mode_info, i)) {
1444                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
1445                         goto fail;
1446                 }
1447         }
1448
1449         for (i = 0; i < dm->dc->caps.max_streams; i++)
1450                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1451                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1452                         goto fail;
1453                 }
1454
1455         dm->display_indexes_num = dm->dc->caps.max_streams;
1456
1457         /* loops over all connectors on the board */
1458         for (i = 0; i < link_cnt; i++) {
1459
1460                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1461                         DRM_ERROR(
1462                                 "KMS: Cannot support more than %d display indexes\n",
1463                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1464                         continue;
1465                 }
1466
1467                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1468                 if (!aconnector)
1469                         goto fail;
1470
1471                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1472                 if (!aencoder)
1473                         goto fail;
1474
1475                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1476                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1477                         goto fail;
1478                 }
1479
1480                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1481                         DRM_ERROR("KMS: Failed to initialize connector\n");
1482                         goto fail;
1483                 }
1484
1485                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1486                                 DETECT_REASON_BOOT))
1487                         amdgpu_dm_update_connector_after_detect(aconnector);
1488         }
1489
1490         /* Software is initialized. Now we can register interrupt handlers. */
1491         switch (adev->asic_type) {
1492         case CHIP_BONAIRE:
1493         case CHIP_HAWAII:
1494         case CHIP_KAVERI:
1495         case CHIP_KABINI:
1496         case CHIP_MULLINS:
1497         case CHIP_TONGA:
1498         case CHIP_FIJI:
1499         case CHIP_CARRIZO:
1500         case CHIP_STONEY:
1501         case CHIP_POLARIS11:
1502         case CHIP_POLARIS10:
1503         case CHIP_POLARIS12:
1504         case CHIP_VEGA10:
1505         case CHIP_VEGA12:
1506                 if (dce110_register_irq_handlers(dm->adev)) {
1507                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1508                         goto fail;
1509                 }
1510                 break;
1511 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1512         case CHIP_RAVEN:
1513                 if (dcn10_register_irq_handlers(dm->adev)) {
1514                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1515                         goto fail;
1516                 }
1517                 /*
1518                  * Temporary disable until pplib/smu interaction is implemented
1519                  */
1520                 dm->dc->debug.disable_stutter = true;
1521                 break;
1522 #endif
1523         default:
1524                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1525                 goto fail;
1526         }
1527
1528         return 0;
1529 fail:
1530         kfree(aencoder);
1531         kfree(aconnector);
1532         for (i = 0; i < dm->dc->caps.max_planes; i++)
1533                 kfree(mode_info->planes[i]);
1534         return -1;
1535 }
1536
1537 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1538 {
1539         drm_mode_config_cleanup(dm->ddev);
1540         return;
1541 }
1542
1543 /******************************************************************************
1544  * amdgpu_display_funcs functions
1545  *****************************************************************************/
1546
1547 /**
1548  * dm_bandwidth_update - program display watermarks
1549  *
1550  * @adev: amdgpu_device pointer
1551  *
1552  * Calculate and program the display watermarks and line buffer allocation.
1553  */
1554 static void dm_bandwidth_update(struct amdgpu_device *adev)
1555 {
1556         /* TODO: implement later */
1557 }
1558
1559 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1560                                      u8 level)
1561 {
1562         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1563 }
1564
1565 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1566 {
1567         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1568         return 0;
1569 }
1570
1571 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1572                                 struct drm_file *filp)
1573 {
1574         struct mod_freesync_params freesync_params;
1575         uint8_t num_streams;
1576         uint8_t i;
1577
1578         struct amdgpu_device *adev = dev->dev_private;
1579         int r = 0;
1580
1581         /* Get freesync enable flag from DRM */
1582
1583         num_streams = dc_get_current_stream_count(adev->dm.dc);
1584
1585         for (i = 0; i < num_streams; i++) {
1586                 struct dc_stream_state *stream;
1587                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1588
1589                 mod_freesync_update_state(adev->dm.freesync_module,
1590                                           &stream, 1, &freesync_params);
1591         }
1592
1593         return r;
1594 }
1595
1596 static const struct amdgpu_display_funcs dm_display_funcs = {
1597         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1598         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1599         .backlight_set_level =
1600                 dm_set_backlight_level,/* called unconditionally */
1601         .backlight_get_level =
1602                 dm_get_backlight_level,/* called unconditionally */
1603         .hpd_sense = NULL,/* called unconditionally */
1604         .hpd_set_polarity = NULL, /* called unconditionally */
1605         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1606         .page_flip_get_scanoutpos =
1607                 dm_crtc_get_scanoutpos,/* called unconditionally */
1608         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1609         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1610         .notify_freesync = amdgpu_notify_freesync,
1611
1612 };
1613
1614 #if defined(CONFIG_DEBUG_KERNEL_DC)
1615
1616 static ssize_t s3_debug_store(struct device *device,
1617                               struct device_attribute *attr,
1618                               const char *buf,
1619                               size_t count)
1620 {
1621         int ret;
1622         int s3_state;
1623         struct pci_dev *pdev = to_pci_dev(device);
1624         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1625         struct amdgpu_device *adev = drm_dev->dev_private;
1626
1627         ret = kstrtoint(buf, 0, &s3_state);
1628
1629         if (ret == 0) {
1630                 if (s3_state) {
1631                         dm_resume(adev);
1632                         amdgpu_dm_display_resume(adev);
1633                         drm_kms_helper_hotplug_event(adev->ddev);
1634                 } else
1635                         dm_suspend(adev);
1636         }
1637
1638         return ret == 0 ? count : 0;
1639 }
1640
1641 DEVICE_ATTR_WO(s3_debug);
1642
1643 #endif
1644
1645 static int dm_early_init(void *handle)
1646 {
1647         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648
1649         switch (adev->asic_type) {
1650         case CHIP_BONAIRE:
1651         case CHIP_HAWAII:
1652                 adev->mode_info.num_crtc = 6;
1653                 adev->mode_info.num_hpd = 6;
1654                 adev->mode_info.num_dig = 6;
1655                 adev->mode_info.plane_type = dm_plane_type_default;
1656                 break;
1657         case CHIP_KAVERI:
1658                 adev->mode_info.num_crtc = 4;
1659                 adev->mode_info.num_hpd = 6;
1660                 adev->mode_info.num_dig = 7;
1661                 adev->mode_info.plane_type = dm_plane_type_default;
1662                 break;
1663         case CHIP_KABINI:
1664         case CHIP_MULLINS:
1665                 adev->mode_info.num_crtc = 2;
1666                 adev->mode_info.num_hpd = 6;
1667                 adev->mode_info.num_dig = 6;
1668                 adev->mode_info.plane_type = dm_plane_type_default;
1669                 break;
1670         case CHIP_FIJI:
1671         case CHIP_TONGA:
1672                 adev->mode_info.num_crtc = 6;
1673                 adev->mode_info.num_hpd = 6;
1674                 adev->mode_info.num_dig = 7;
1675                 adev->mode_info.plane_type = dm_plane_type_default;
1676                 break;
1677         case CHIP_CARRIZO:
1678                 adev->mode_info.num_crtc = 3;
1679                 adev->mode_info.num_hpd = 6;
1680                 adev->mode_info.num_dig = 9;
1681                 adev->mode_info.plane_type = dm_plane_type_carizzo;
1682                 break;
1683         case CHIP_STONEY:
1684                 adev->mode_info.num_crtc = 2;
1685                 adev->mode_info.num_hpd = 6;
1686                 adev->mode_info.num_dig = 9;
1687                 adev->mode_info.plane_type = dm_plane_type_stoney;
1688                 break;
1689         case CHIP_POLARIS11:
1690         case CHIP_POLARIS12:
1691                 adev->mode_info.num_crtc = 5;
1692                 adev->mode_info.num_hpd = 5;
1693                 adev->mode_info.num_dig = 5;
1694                 adev->mode_info.plane_type = dm_plane_type_default;
1695                 break;
1696         case CHIP_POLARIS10:
1697                 adev->mode_info.num_crtc = 6;
1698                 adev->mode_info.num_hpd = 6;
1699                 adev->mode_info.num_dig = 6;
1700                 adev->mode_info.plane_type = dm_plane_type_default;
1701                 break;
1702         case CHIP_VEGA10:
1703         case CHIP_VEGA12:
1704                 adev->mode_info.num_crtc = 6;
1705                 adev->mode_info.num_hpd = 6;
1706                 adev->mode_info.num_dig = 6;
1707                 adev->mode_info.plane_type = dm_plane_type_default;
1708                 break;
1709 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1710         case CHIP_RAVEN:
1711                 adev->mode_info.num_crtc = 4;
1712                 adev->mode_info.num_hpd = 4;
1713                 adev->mode_info.num_dig = 4;
1714                 adev->mode_info.plane_type = dm_plane_type_default;
1715                 break;
1716 #endif
1717         default:
1718                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1719                 return -EINVAL;
1720         }
1721
1722         amdgpu_dm_set_irq_funcs(adev);
1723
1724         if (adev->mode_info.funcs == NULL)
1725                 adev->mode_info.funcs = &dm_display_funcs;
1726
1727         /* Note: Do NOT change adev->audio_endpt_rreg and
1728          * adev->audio_endpt_wreg because they are initialised in
1729          * amdgpu_device_init() */
1730 #if defined(CONFIG_DEBUG_KERNEL_DC)
1731         device_create_file(
1732                 adev->ddev->dev,
1733                 &dev_attr_s3_debug);
1734 #endif
1735
1736         return 0;
1737 }
1738
1739 static bool modeset_required(struct drm_crtc_state *crtc_state,
1740                              struct dc_stream_state *new_stream,
1741                              struct dc_stream_state *old_stream)
1742 {
1743         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1744                 return false;
1745
1746         if (!crtc_state->enable)
1747                 return false;
1748
1749         return crtc_state->active;
1750 }
1751
1752 static bool modereset_required(struct drm_crtc_state *crtc_state)
1753 {
1754         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1755                 return false;
1756
1757         return !crtc_state->enable || !crtc_state->active;
1758 }
1759
1760 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1761 {
1762         drm_encoder_cleanup(encoder);
1763         kfree(encoder);
1764 }
1765
1766 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1767         .destroy = amdgpu_dm_encoder_destroy,
1768 };
1769
1770 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1771                                         struct dc_plane_state *plane_state)
1772 {
1773         plane_state->src_rect.x = state->src_x >> 16;
1774         plane_state->src_rect.y = state->src_y >> 16;
1775         /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1776         plane_state->src_rect.width = state->src_w >> 16;
1777
1778         if (plane_state->src_rect.width == 0)
1779                 return false;
1780
1781         plane_state->src_rect.height = state->src_h >> 16;
1782         if (plane_state->src_rect.height == 0)
1783                 return false;
1784
1785         plane_state->dst_rect.x = state->crtc_x;
1786         plane_state->dst_rect.y = state->crtc_y;
1787
1788         if (state->crtc_w == 0)
1789                 return false;
1790
1791         plane_state->dst_rect.width = state->crtc_w;
1792
1793         if (state->crtc_h == 0)
1794                 return false;
1795
1796         plane_state->dst_rect.height = state->crtc_h;
1797
1798         plane_state->clip_rect = plane_state->dst_rect;
1799
1800         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1801         case DRM_MODE_ROTATE_0:
1802                 plane_state->rotation = ROTATION_ANGLE_0;
1803                 break;
1804         case DRM_MODE_ROTATE_90:
1805                 plane_state->rotation = ROTATION_ANGLE_90;
1806                 break;
1807         case DRM_MODE_ROTATE_180:
1808                 plane_state->rotation = ROTATION_ANGLE_180;
1809                 break;
1810         case DRM_MODE_ROTATE_270:
1811                 plane_state->rotation = ROTATION_ANGLE_270;
1812                 break;
1813         default:
1814                 plane_state->rotation = ROTATION_ANGLE_0;
1815                 break;
1816         }
1817
1818         return true;
1819 }
1820 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1821                        uint64_t *tiling_flags)
1822 {
1823         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1824         int r = amdgpu_bo_reserve(rbo, false);
1825
1826         if (unlikely(r)) {
1827                 // Don't show error msg. when return -ERESTARTSYS
1828                 if (r != -ERESTARTSYS)
1829                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1830                 return r;
1831         }
1832
1833         if (tiling_flags)
1834                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1835
1836         amdgpu_bo_unreserve(rbo);
1837
1838         return r;
1839 }
1840
1841 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1842                                          struct dc_plane_state *plane_state,
1843                                          const struct amdgpu_framebuffer *amdgpu_fb)
1844 {
1845         uint64_t tiling_flags;
1846         unsigned int awidth;
1847         const struct drm_framebuffer *fb = &amdgpu_fb->base;
1848         int ret = 0;
1849         struct drm_format_name_buf format_name;
1850
1851         ret = get_fb_info(
1852                 amdgpu_fb,
1853                 &tiling_flags);
1854
1855         if (ret)
1856                 return ret;
1857
1858         switch (fb->format->format) {
1859         case DRM_FORMAT_C8:
1860                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1861                 break;
1862         case DRM_FORMAT_RGB565:
1863                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1864                 break;
1865         case DRM_FORMAT_XRGB8888:
1866         case DRM_FORMAT_ARGB8888:
1867                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1868                 break;
1869         case DRM_FORMAT_XRGB2101010:
1870         case DRM_FORMAT_ARGB2101010:
1871                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1872                 break;
1873         case DRM_FORMAT_XBGR2101010:
1874         case DRM_FORMAT_ABGR2101010:
1875                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1876                 break;
1877         case DRM_FORMAT_NV21:
1878                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1879                 break;
1880         case DRM_FORMAT_NV12:
1881                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1882                 break;
1883         default:
1884                 DRM_ERROR("Unsupported screen format %s\n",
1885                           drm_get_format_name(fb->format->format, &format_name));
1886                 return -EINVAL;
1887         }
1888
1889         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1890                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1891                 plane_state->plane_size.grph.surface_size.x = 0;
1892                 plane_state->plane_size.grph.surface_size.y = 0;
1893                 plane_state->plane_size.grph.surface_size.width = fb->width;
1894                 plane_state->plane_size.grph.surface_size.height = fb->height;
1895                 plane_state->plane_size.grph.surface_pitch =
1896                                 fb->pitches[0] / fb->format->cpp[0];
1897                 /* TODO: unhardcode */
1898                 plane_state->color_space = COLOR_SPACE_SRGB;
1899
1900         } else {
1901                 awidth = ALIGN(fb->width, 64);
1902                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1903                 plane_state->plane_size.video.luma_size.x = 0;
1904                 plane_state->plane_size.video.luma_size.y = 0;
1905                 plane_state->plane_size.video.luma_size.width = awidth;
1906                 plane_state->plane_size.video.luma_size.height = fb->height;
1907                 /* TODO: unhardcode */
1908                 plane_state->plane_size.video.luma_pitch = awidth;
1909
1910                 plane_state->plane_size.video.chroma_size.x = 0;
1911                 plane_state->plane_size.video.chroma_size.y = 0;
1912                 plane_state->plane_size.video.chroma_size.width = awidth;
1913                 plane_state->plane_size.video.chroma_size.height = fb->height;
1914                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1915
1916                 /* TODO: unhardcode */
1917                 plane_state->color_space = COLOR_SPACE_YCBCR709;
1918         }
1919
1920         memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1921
1922         /* Fill GFX8 params */
1923         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1924                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1925
1926                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1927                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1928                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1929                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1930                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1931
1932                 /* XXX fix me for VI */
1933                 plane_state->tiling_info.gfx8.num_banks = num_banks;
1934                 plane_state->tiling_info.gfx8.array_mode =
1935                                 DC_ARRAY_2D_TILED_THIN1;
1936                 plane_state->tiling_info.gfx8.tile_split = tile_split;
1937                 plane_state->tiling_info.gfx8.bank_width = bankw;
1938                 plane_state->tiling_info.gfx8.bank_height = bankh;
1939                 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1940                 plane_state->tiling_info.gfx8.tile_mode =
1941                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1942         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1943                         == DC_ARRAY_1D_TILED_THIN1) {
1944                 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1945         }
1946
1947         plane_state->tiling_info.gfx8.pipe_config =
1948                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1949
1950         if (adev->asic_type == CHIP_VEGA10 ||
1951             adev->asic_type == CHIP_VEGA12 ||
1952             adev->asic_type == CHIP_RAVEN) {
1953                 /* Fill GFX9 params */
1954                 plane_state->tiling_info.gfx9.num_pipes =
1955                         adev->gfx.config.gb_addr_config_fields.num_pipes;
1956                 plane_state->tiling_info.gfx9.num_banks =
1957                         adev->gfx.config.gb_addr_config_fields.num_banks;
1958                 plane_state->tiling_info.gfx9.pipe_interleave =
1959                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1960                 plane_state->tiling_info.gfx9.num_shader_engines =
1961                         adev->gfx.config.gb_addr_config_fields.num_se;
1962                 plane_state->tiling_info.gfx9.max_compressed_frags =
1963                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1964                 plane_state->tiling_info.gfx9.num_rb_per_se =
1965                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1966                 plane_state->tiling_info.gfx9.swizzle =
1967                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1968                 plane_state->tiling_info.gfx9.shaderEnable = 1;
1969         }
1970
1971         plane_state->visible = true;
1972         plane_state->scaling_quality.h_taps_c = 0;
1973         plane_state->scaling_quality.v_taps_c = 0;
1974
1975         /* is this needed? is plane_state zeroed at allocation? */
1976         plane_state->scaling_quality.h_taps = 0;
1977         plane_state->scaling_quality.v_taps = 0;
1978         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1979
1980         return ret;
1981
1982 }
1983
1984 static int fill_plane_attributes(struct amdgpu_device *adev,
1985                                  struct dc_plane_state *dc_plane_state,
1986                                  struct drm_plane_state *plane_state,
1987                                  struct drm_crtc_state *crtc_state)
1988 {
1989         const struct amdgpu_framebuffer *amdgpu_fb =
1990                 to_amdgpu_framebuffer(plane_state->fb);
1991         const struct drm_crtc *crtc = plane_state->crtc;
1992         struct dc_transfer_func *input_tf;
1993         int ret = 0;
1994
1995         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1996                 return -EINVAL;
1997
1998         ret = fill_plane_attributes_from_fb(
1999                 crtc->dev->dev_private,
2000                 dc_plane_state,
2001                 amdgpu_fb);
2002
2003         if (ret)
2004                 return ret;
2005
2006         input_tf = dc_create_transfer_func();
2007
2008         if (input_tf == NULL)
2009                 return -ENOMEM;
2010
2011         dc_plane_state->in_transfer_func = input_tf;
2012
2013         /*
2014          * Always set input transfer function, since plane state is refreshed
2015          * every time.
2016          */
2017         ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2018         if (ret) {
2019                 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2020                 dc_plane_state->in_transfer_func = NULL;
2021         }
2022
2023         return ret;
2024 }
2025
2026 /*****************************************************************************/
2027
2028 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2029                                            const struct dm_connector_state *dm_state,
2030                                            struct dc_stream_state *stream)
2031 {
2032         enum amdgpu_rmx_type rmx_type;
2033
2034         struct rect src = { 0 }; /* viewport in composition space*/
2035         struct rect dst = { 0 }; /* stream addressable area */
2036
2037         /* no mode. nothing to be done */
2038         if (!mode)
2039                 return;
2040
2041         /* Full screen scaling by default */
2042         src.width = mode->hdisplay;
2043         src.height = mode->vdisplay;
2044         dst.width = stream->timing.h_addressable;
2045         dst.height = stream->timing.v_addressable;
2046
2047         if (dm_state) {
2048                 rmx_type = dm_state->scaling;
2049                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2050                         if (src.width * dst.height <
2051                                         src.height * dst.width) {
2052                                 /* height needs less upscaling/more downscaling */
2053                                 dst.width = src.width *
2054                                                 dst.height / src.height;
2055                         } else {
2056                                 /* width needs less upscaling/more downscaling */
2057                                 dst.height = src.height *
2058                                                 dst.width / src.width;
2059                         }
2060                 } else if (rmx_type == RMX_CENTER) {
2061                         dst = src;
2062                 }
2063
2064                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2065                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2066
2067                 if (dm_state->underscan_enable) {
2068                         dst.x += dm_state->underscan_hborder / 2;
2069                         dst.y += dm_state->underscan_vborder / 2;
2070                         dst.width -= dm_state->underscan_hborder;
2071                         dst.height -= dm_state->underscan_vborder;
2072                 }
2073         }
2074
2075         stream->src = src;
2076         stream->dst = dst;
2077
2078         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2079                         dst.x, dst.y, dst.width, dst.height);
2080
2081 }
2082
2083 static enum dc_color_depth
2084 convert_color_depth_from_display_info(const struct drm_connector *connector)
2085 {
2086         uint32_t bpc = connector->display_info.bpc;
2087
2088         /* Limited color depth to 8bit
2089          * TODO: Still need to handle deep color
2090          */
2091         if (bpc > 8)
2092                 bpc = 8;
2093
2094         switch (bpc) {
2095         case 0:
2096                 /* Temporary Work around, DRM don't parse color depth for
2097                  * EDID revision before 1.4
2098                  * TODO: Fix edid parsing
2099                  */
2100                 return COLOR_DEPTH_888;
2101         case 6:
2102                 return COLOR_DEPTH_666;
2103         case 8:
2104                 return COLOR_DEPTH_888;
2105         case 10:
2106                 return COLOR_DEPTH_101010;
2107         case 12:
2108                 return COLOR_DEPTH_121212;
2109         case 14:
2110                 return COLOR_DEPTH_141414;
2111         case 16:
2112                 return COLOR_DEPTH_161616;
2113         default:
2114                 return COLOR_DEPTH_UNDEFINED;
2115         }
2116 }
2117
2118 static enum dc_aspect_ratio
2119 get_aspect_ratio(const struct drm_display_mode *mode_in)
2120 {
2121         int32_t width = mode_in->crtc_hdisplay * 9;
2122         int32_t height = mode_in->crtc_vdisplay * 16;
2123
2124         if ((width - height) < 10 && (width - height) > -10)
2125                 return ASPECT_RATIO_16_9;
2126         else
2127                 return ASPECT_RATIO_4_3;
2128 }
2129
2130 static enum dc_color_space
2131 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2132 {
2133         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2134
2135         switch (dc_crtc_timing->pixel_encoding) {
2136         case PIXEL_ENCODING_YCBCR422:
2137         case PIXEL_ENCODING_YCBCR444:
2138         case PIXEL_ENCODING_YCBCR420:
2139         {
2140                 /*
2141                  * 27030khz is the separation point between HDTV and SDTV
2142                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2143                  * respectively
2144                  */
2145                 if (dc_crtc_timing->pix_clk_khz > 27030) {
2146                         if (dc_crtc_timing->flags.Y_ONLY)
2147                                 color_space =
2148                                         COLOR_SPACE_YCBCR709_LIMITED;
2149                         else
2150                                 color_space = COLOR_SPACE_YCBCR709;
2151                 } else {
2152                         if (dc_crtc_timing->flags.Y_ONLY)
2153                                 color_space =
2154                                         COLOR_SPACE_YCBCR601_LIMITED;
2155                         else
2156                                 color_space = COLOR_SPACE_YCBCR601;
2157                 }
2158
2159         }
2160         break;
2161         case PIXEL_ENCODING_RGB:
2162                 color_space = COLOR_SPACE_SRGB;
2163                 break;
2164
2165         default:
2166                 WARN_ON(1);
2167                 break;
2168         }
2169
2170         return color_space;
2171 }
2172
2173 /*****************************************************************************/
2174
2175 static void
2176 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2177                                              const struct drm_display_mode *mode_in,
2178                                              const struct drm_connector *connector)
2179 {
2180         struct dc_crtc_timing *timing_out = &stream->timing;
2181         struct dc_transfer_func *tf = dc_create_transfer_func();
2182
2183         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2184
2185         timing_out->h_border_left = 0;
2186         timing_out->h_border_right = 0;
2187         timing_out->v_border_top = 0;
2188         timing_out->v_border_bottom = 0;
2189         /* TODO: un-hardcode */
2190
2191         if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2192                         && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2193                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2194         else
2195                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2196
2197         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2198         timing_out->display_color_depth = convert_color_depth_from_display_info(
2199                         connector);
2200         timing_out->scan_type = SCANNING_TYPE_NODATA;
2201         timing_out->hdmi_vic = 0;
2202         timing_out->vic = drm_match_cea_mode(mode_in);
2203
2204         timing_out->h_addressable = mode_in->crtc_hdisplay;
2205         timing_out->h_total = mode_in->crtc_htotal;
2206         timing_out->h_sync_width =
2207                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2208         timing_out->h_front_porch =
2209                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2210         timing_out->v_total = mode_in->crtc_vtotal;
2211         timing_out->v_addressable = mode_in->crtc_vdisplay;
2212         timing_out->v_front_porch =
2213                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2214         timing_out->v_sync_width =
2215                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2216         timing_out->pix_clk_khz = mode_in->crtc_clock;
2217         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2218         if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2219                 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2220         if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2221                 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2222
2223         stream->output_color_space = get_output_color_space(timing_out);
2224
2225         tf->type = TF_TYPE_PREDEFINED;
2226         tf->tf = TRANSFER_FUNCTION_SRGB;
2227         stream->out_transfer_func = tf;
2228 }
2229
2230 static void fill_audio_info(struct audio_info *audio_info,
2231                             const struct drm_connector *drm_connector,
2232                             const struct dc_sink *dc_sink)
2233 {
2234         int i = 0;
2235         int cea_revision = 0;
2236         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2237
2238         audio_info->manufacture_id = edid_caps->manufacturer_id;
2239         audio_info->product_id = edid_caps->product_id;
2240
2241         cea_revision = drm_connector->display_info.cea_rev;
2242
2243         strncpy(audio_info->display_name,
2244                 edid_caps->display_name,
2245                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2246
2247         if (cea_revision >= 3) {
2248                 audio_info->mode_count = edid_caps->audio_mode_count;
2249
2250                 for (i = 0; i < audio_info->mode_count; ++i) {
2251                         audio_info->modes[i].format_code =
2252                                         (enum audio_format_code)
2253                                         (edid_caps->audio_modes[i].format_code);
2254                         audio_info->modes[i].channel_count =
2255                                         edid_caps->audio_modes[i].channel_count;
2256                         audio_info->modes[i].sample_rates.all =
2257                                         edid_caps->audio_modes[i].sample_rate;
2258                         audio_info->modes[i].sample_size =
2259                                         edid_caps->audio_modes[i].sample_size;
2260                 }
2261         }
2262
2263         audio_info->flags.all = edid_caps->speaker_flags;
2264
2265         /* TODO: We only check for the progressive mode, check for interlace mode too */
2266         if (drm_connector->latency_present[0]) {
2267                 audio_info->video_latency = drm_connector->video_latency[0];
2268                 audio_info->audio_latency = drm_connector->audio_latency[0];
2269         }
2270
2271         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2272
2273 }
2274
2275 static void
2276 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2277                                       struct drm_display_mode *dst_mode)
2278 {
2279         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2280         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2281         dst_mode->crtc_clock = src_mode->crtc_clock;
2282         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2283         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2284         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2285         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2286         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2287         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2288         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2289         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2290         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2291         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2292         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2293 }
2294
2295 static void
2296 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2297                                         const struct drm_display_mode *native_mode,
2298                                         bool scale_enabled)
2299 {
2300         if (scale_enabled) {
2301                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2302         } else if (native_mode->clock == drm_mode->clock &&
2303                         native_mode->htotal == drm_mode->htotal &&
2304                         native_mode->vtotal == drm_mode->vtotal) {
2305                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2306         } else {
2307                 /* no scaling nor amdgpu inserted, no need to patch */
2308         }
2309 }
2310
2311 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2312 {
2313         struct dc_sink *sink = NULL;
2314         struct dc_sink_init_data sink_init_data = { 0 };
2315
2316         sink_init_data.link = aconnector->dc_link;
2317         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2318
2319         sink = dc_sink_create(&sink_init_data);
2320         if (!sink) {
2321                 DRM_ERROR("Failed to create sink!\n");
2322                 return -ENOMEM;
2323         }
2324
2325         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2326         aconnector->fake_enable = true;
2327
2328         aconnector->dc_sink = sink;
2329         aconnector->dc_link->local_sink = sink;
2330
2331         return 0;
2332 }
2333
2334 static void set_multisync_trigger_params(
2335                 struct dc_stream_state *stream)
2336 {
2337         if (stream->triggered_crtc_reset.enabled) {
2338                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2339                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2340         }
2341 }
2342
2343 static void set_master_stream(struct dc_stream_state *stream_set[],
2344                               int stream_count)
2345 {
2346         int j, highest_rfr = 0, master_stream = 0;
2347
2348         for (j = 0;  j < stream_count; j++) {
2349                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2350                         int refresh_rate = 0;
2351
2352                         refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2353                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2354                         if (refresh_rate > highest_rfr) {
2355                                 highest_rfr = refresh_rate;
2356                                 master_stream = j;
2357                         }
2358                 }
2359         }
2360         for (j = 0;  j < stream_count; j++) {
2361                 if (stream_set[j])
2362                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2363         }
2364 }
2365
2366 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2367 {
2368         int i = 0;
2369
2370         if (context->stream_count < 2)
2371                 return;
2372         for (i = 0; i < context->stream_count ; i++) {
2373                 if (!context->streams[i])
2374                         continue;
2375                 /* TODO: add a function to read AMD VSDB bits and will set
2376                  * crtc_sync_master.multi_sync_enabled flag
2377                  * For now its set to false
2378                  */
2379                 set_multisync_trigger_params(context->streams[i]);
2380         }
2381         set_master_stream(context->streams, context->stream_count);
2382 }
2383
2384 static struct dc_stream_state *
2385 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2386                        const struct drm_display_mode *drm_mode,
2387                        const struct dm_connector_state *dm_state)
2388 {
2389         struct drm_display_mode *preferred_mode = NULL;
2390         struct drm_connector *drm_connector;
2391         struct dc_stream_state *stream = NULL;
2392         struct drm_display_mode mode = *drm_mode;
2393         bool native_mode_found = false;
2394
2395         if (aconnector == NULL) {
2396                 DRM_ERROR("aconnector is NULL!\n");
2397                 return stream;
2398         }
2399
2400         drm_connector = &aconnector->base;
2401
2402         if (!aconnector->dc_sink) {
2403                 /*
2404                  * Create dc_sink when necessary to MST
2405                  * Don't apply fake_sink to MST
2406                  */
2407                 if (aconnector->mst_port) {
2408                         dm_dp_mst_dc_sink_create(drm_connector);
2409                         return stream;
2410                 }
2411
2412                 if (create_fake_sink(aconnector))
2413                         return stream;
2414         }
2415
2416         stream = dc_create_stream_for_sink(aconnector->dc_sink);
2417
2418         if (stream == NULL) {
2419                 DRM_ERROR("Failed to create stream for sink!\n");
2420                 return stream;
2421         }
2422
2423         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2424                 /* Search for preferred mode */
2425                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2426                         native_mode_found = true;
2427                         break;
2428                 }
2429         }
2430         if (!native_mode_found)
2431                 preferred_mode = list_first_entry_or_null(
2432                                 &aconnector->base.modes,
2433                                 struct drm_display_mode,
2434                                 head);
2435
2436         if (preferred_mode == NULL) {
2437                 /* This may not be an error, the use case is when we we have no
2438                  * usermode calls to reset and set mode upon hotplug. In this
2439                  * case, we call set mode ourselves to restore the previous mode
2440                  * and the modelist may not be filled in in time.
2441                  */
2442                 DRM_DEBUG_DRIVER("No preferred mode found\n");
2443         } else {
2444                 decide_crtc_timing_for_drm_display_mode(
2445                                 &mode, preferred_mode,
2446                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2447         }
2448
2449         if (!dm_state)
2450                 drm_mode_set_crtcinfo(&mode, 0);
2451
2452         fill_stream_properties_from_drm_display_mode(stream,
2453                         &mode, &aconnector->base);
2454         update_stream_scaling_settings(&mode, dm_state, stream);
2455
2456         fill_audio_info(
2457                 &stream->audio_info,
2458                 drm_connector,
2459                 aconnector->dc_sink);
2460
2461         update_stream_signal(stream);
2462
2463         return stream;
2464 }
2465
2466 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2467 {
2468         drm_crtc_cleanup(crtc);
2469         kfree(crtc);
2470 }
2471
2472 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2473                                   struct drm_crtc_state *state)
2474 {
2475         struct dm_crtc_state *cur = to_dm_crtc_state(state);
2476
2477         /* TODO Destroy dc_stream objects are stream object is flattened */
2478         if (cur->stream)
2479                 dc_stream_release(cur->stream);
2480
2481
2482         __drm_atomic_helper_crtc_destroy_state(state);
2483
2484
2485         kfree(state);
2486 }
2487
2488 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2489 {
2490         struct dm_crtc_state *state;
2491
2492         if (crtc->state)
2493                 dm_crtc_destroy_state(crtc, crtc->state);
2494
2495         state = kzalloc(sizeof(*state), GFP_KERNEL);
2496         if (WARN_ON(!state))
2497                 return;
2498
2499         crtc->state = &state->base;
2500         crtc->state->crtc = crtc;
2501
2502 }
2503
2504 static struct drm_crtc_state *
2505 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2506 {
2507         struct dm_crtc_state *state, *cur;
2508
2509         cur = to_dm_crtc_state(crtc->state);
2510
2511         if (WARN_ON(!crtc->state))
2512                 return NULL;
2513
2514         state = kzalloc(sizeof(*state), GFP_KERNEL);
2515         if (!state)
2516                 return NULL;
2517
2518         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2519
2520         if (cur->stream) {
2521                 state->stream = cur->stream;
2522                 dc_stream_retain(state->stream);
2523         }
2524
2525         /* TODO Duplicate dc_stream after objects are stream object is flattened */
2526
2527         return &state->base;
2528 }
2529
2530
2531 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2532 {
2533         enum dc_irq_source irq_source;
2534         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2535         struct amdgpu_device *adev = crtc->dev->dev_private;
2536
2537         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2538         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2539 }
2540
2541 static int dm_enable_vblank(struct drm_crtc *crtc)
2542 {
2543         return dm_set_vblank(crtc, true);
2544 }
2545
2546 static void dm_disable_vblank(struct drm_crtc *crtc)
2547 {
2548         dm_set_vblank(crtc, false);
2549 }
2550
2551 /* Implemented only the options currently availible for the driver */
2552 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2553         .reset = dm_crtc_reset_state,
2554         .destroy = amdgpu_dm_crtc_destroy,
2555         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2556         .set_config = drm_atomic_helper_set_config,
2557         .page_flip = drm_atomic_helper_page_flip,
2558         .atomic_duplicate_state = dm_crtc_duplicate_state,
2559         .atomic_destroy_state = dm_crtc_destroy_state,
2560         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
2561         .enable_vblank = dm_enable_vblank,
2562         .disable_vblank = dm_disable_vblank,
2563 };
2564
2565 static enum drm_connector_status
2566 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2567 {
2568         bool connected;
2569         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2570
2571         /* Notes:
2572          * 1. This interface is NOT called in context of HPD irq.
2573          * 2. This interface *is called* in context of user-mode ioctl. Which
2574          * makes it a bad place for *any* MST-related activit. */
2575
2576         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2577             !aconnector->fake_enable)
2578                 connected = (aconnector->dc_sink != NULL);
2579         else
2580                 connected = (aconnector->base.force == DRM_FORCE_ON);
2581
2582         return (connected ? connector_status_connected :
2583                         connector_status_disconnected);
2584 }
2585
2586 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2587                                             struct drm_connector_state *connector_state,
2588                                             struct drm_property *property,
2589                                             uint64_t val)
2590 {
2591         struct drm_device *dev = connector->dev;
2592         struct amdgpu_device *adev = dev->dev_private;
2593         struct dm_connector_state *dm_old_state =
2594                 to_dm_connector_state(connector->state);
2595         struct dm_connector_state *dm_new_state =
2596                 to_dm_connector_state(connector_state);
2597
2598         int ret = -EINVAL;
2599
2600         if (property == dev->mode_config.scaling_mode_property) {
2601                 enum amdgpu_rmx_type rmx_type;
2602
2603                 switch (val) {
2604                 case DRM_MODE_SCALE_CENTER:
2605                         rmx_type = RMX_CENTER;
2606                         break;
2607                 case DRM_MODE_SCALE_ASPECT:
2608                         rmx_type = RMX_ASPECT;
2609                         break;
2610                 case DRM_MODE_SCALE_FULLSCREEN:
2611                         rmx_type = RMX_FULL;
2612                         break;
2613                 case DRM_MODE_SCALE_NONE:
2614                 default:
2615                         rmx_type = RMX_OFF;
2616                         break;
2617                 }
2618
2619                 if (dm_old_state->scaling == rmx_type)
2620                         return 0;
2621
2622                 dm_new_state->scaling = rmx_type;
2623                 ret = 0;
2624         } else if (property == adev->mode_info.underscan_hborder_property) {
2625                 dm_new_state->underscan_hborder = val;
2626                 ret = 0;
2627         } else if (property == adev->mode_info.underscan_vborder_property) {
2628                 dm_new_state->underscan_vborder = val;
2629                 ret = 0;
2630         } else if (property == adev->mode_info.underscan_property) {
2631                 dm_new_state->underscan_enable = val;
2632                 ret = 0;
2633         }
2634
2635         return ret;
2636 }
2637
2638 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2639                                             const struct drm_connector_state *state,
2640                                             struct drm_property *property,
2641                                             uint64_t *val)
2642 {
2643         struct drm_device *dev = connector->dev;
2644         struct amdgpu_device *adev = dev->dev_private;
2645         struct dm_connector_state *dm_state =
2646                 to_dm_connector_state(state);
2647         int ret = -EINVAL;
2648
2649         if (property == dev->mode_config.scaling_mode_property) {
2650                 switch (dm_state->scaling) {
2651                 case RMX_CENTER:
2652                         *val = DRM_MODE_SCALE_CENTER;
2653                         break;
2654                 case RMX_ASPECT:
2655                         *val = DRM_MODE_SCALE_ASPECT;
2656                         break;
2657                 case RMX_FULL:
2658                         *val = DRM_MODE_SCALE_FULLSCREEN;
2659                         break;
2660                 case RMX_OFF:
2661                 default:
2662                         *val = DRM_MODE_SCALE_NONE;
2663                         break;
2664                 }
2665                 ret = 0;
2666         } else if (property == adev->mode_info.underscan_hborder_property) {
2667                 *val = dm_state->underscan_hborder;
2668                 ret = 0;
2669         } else if (property == adev->mode_info.underscan_vborder_property) {
2670                 *val = dm_state->underscan_vborder;
2671                 ret = 0;
2672         } else if (property == adev->mode_info.underscan_property) {
2673                 *val = dm_state->underscan_enable;
2674                 ret = 0;
2675         }
2676         return ret;
2677 }
2678
2679 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2680 {
2681         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2682         const struct dc_link *link = aconnector->dc_link;
2683         struct amdgpu_device *adev = connector->dev->dev_private;
2684         struct amdgpu_display_manager *dm = &adev->dm;
2685 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2686         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2687
2688         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2689                 amdgpu_dm_register_backlight_device(dm);
2690
2691                 if (dm->backlight_dev) {
2692                         backlight_device_unregister(dm->backlight_dev);
2693                         dm->backlight_dev = NULL;
2694                 }
2695
2696         }
2697 #endif
2698         drm_connector_unregister(connector);
2699         drm_connector_cleanup(connector);
2700         kfree(connector);
2701 }
2702
2703 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2704 {
2705         struct dm_connector_state *state =
2706                 to_dm_connector_state(connector->state);
2707
2708         kfree(state);
2709
2710         state = kzalloc(sizeof(*state), GFP_KERNEL);
2711
2712         if (state) {
2713                 state->scaling = RMX_OFF;
2714                 state->underscan_enable = false;
2715                 state->underscan_hborder = 0;
2716                 state->underscan_vborder = 0;
2717
2718                 connector->state = &state->base;
2719                 connector->state->connector = connector;
2720         }
2721 }
2722
2723 struct drm_connector_state *
2724 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2725 {
2726         struct dm_connector_state *state =
2727                 to_dm_connector_state(connector->state);
2728
2729         struct dm_connector_state *new_state =
2730                         kmemdup(state, sizeof(*state), GFP_KERNEL);
2731
2732         if (new_state) {
2733                 __drm_atomic_helper_connector_duplicate_state(connector,
2734                                                               &new_state->base);
2735                 return &new_state->base;
2736         }
2737
2738         return NULL;
2739 }
2740
2741 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2742         .reset = amdgpu_dm_connector_funcs_reset,
2743         .detect = amdgpu_dm_connector_detect,
2744         .fill_modes = drm_helper_probe_single_connector_modes,
2745         .destroy = amdgpu_dm_connector_destroy,
2746         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2747         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2748         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2749         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2750 };
2751
2752 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2753 {
2754         int enc_id = connector->encoder_ids[0];
2755         struct drm_mode_object *obj;
2756         struct drm_encoder *encoder;
2757
2758         DRM_DEBUG_DRIVER("Finding the best encoder\n");
2759
2760         /* pick the encoder ids */
2761         if (enc_id) {
2762                 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2763                 if (!obj) {
2764                         DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2765                         return NULL;
2766                 }
2767                 encoder = obj_to_encoder(obj);
2768                 return encoder;
2769         }
2770         DRM_ERROR("No encoder id\n");
2771         return NULL;
2772 }
2773
2774 static int get_modes(struct drm_connector *connector)
2775 {
2776         return amdgpu_dm_connector_get_modes(connector);
2777 }
2778
2779 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2780 {
2781         struct dc_sink_init_data init_params = {
2782                         .link = aconnector->dc_link,
2783                         .sink_signal = SIGNAL_TYPE_VIRTUAL
2784         };
2785         struct edid *edid;
2786
2787         if (!aconnector->base.edid_blob_ptr) {
2788                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2789                                 aconnector->base.name);
2790
2791                 aconnector->base.force = DRM_FORCE_OFF;
2792                 aconnector->base.override_edid = false;
2793                 return;
2794         }
2795
2796         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2797
2798         aconnector->edid = edid;
2799
2800         aconnector->dc_em_sink = dc_link_add_remote_sink(
2801                 aconnector->dc_link,
2802                 (uint8_t *)edid,
2803                 (edid->extensions + 1) * EDID_LENGTH,
2804                 &init_params);
2805
2806         if (aconnector->base.force == DRM_FORCE_ON)
2807                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2808                 aconnector->dc_link->local_sink :
2809                 aconnector->dc_em_sink;
2810 }
2811
2812 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2813 {
2814         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2815
2816         /* In case of headless boot with force on for DP managed connector
2817          * Those settings have to be != 0 to get initial modeset
2818          */
2819         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2820                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2821                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2822         }
2823
2824
2825         aconnector->base.override_edid = true;
2826         create_eml_sink(aconnector);
2827 }
2828
2829 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2830                                    struct drm_display_mode *mode)
2831 {
2832         int result = MODE_ERROR;
2833         struct dc_sink *dc_sink;
2834         struct amdgpu_device *adev = connector->dev->dev_private;
2835         /* TODO: Unhardcode stream count */
2836         struct dc_stream_state *stream;
2837         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2838         enum dc_status dc_result = DC_OK;
2839
2840         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2841                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2842                 return result;
2843
2844         /* Only run this the first time mode_valid is called to initilialize
2845          * EDID mgmt
2846          */
2847         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2848                 !aconnector->dc_em_sink)
2849                 handle_edid_mgmt(aconnector);
2850
2851         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2852
2853         if (dc_sink == NULL) {
2854                 DRM_ERROR("dc_sink is NULL!\n");
2855                 goto fail;
2856         }
2857
2858         stream = create_stream_for_sink(aconnector, mode, NULL);
2859         if (stream == NULL) {
2860                 DRM_ERROR("Failed to create stream for sink!\n");
2861                 goto fail;
2862         }
2863
2864         dc_result = dc_validate_stream(adev->dm.dc, stream);
2865
2866         if (dc_result == DC_OK)
2867                 result = MODE_OK;
2868         else
2869                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
2870                               mode->vdisplay,
2871                               mode->hdisplay,
2872                               mode->clock,
2873                               dc_result);
2874
2875         dc_stream_release(stream);
2876
2877 fail:
2878         /* TODO: error handling*/
2879         return result;
2880 }
2881
2882 static const struct drm_connector_helper_funcs
2883 amdgpu_dm_connector_helper_funcs = {
2884         /*
2885          * If hotplug a second bigger display in FB Con mode, bigger resolution
2886          * modes will be filtered by drm_mode_validate_size(), and those modes
2887          * is missing after user start lightdm. So we need to renew modes list.
2888          * in get_modes call back, not just return the modes count
2889          */
2890         .get_modes = get_modes,
2891         .mode_valid = amdgpu_dm_connector_mode_valid,
2892         .best_encoder = best_encoder
2893 };
2894
2895 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2896 {
2897 }
2898
2899 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2900                                        struct drm_crtc_state *state)
2901 {
2902         struct amdgpu_device *adev = crtc->dev->dev_private;
2903         struct dc *dc = adev->dm.dc;
2904         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2905         int ret = -EINVAL;
2906
2907         if (unlikely(!dm_crtc_state->stream &&
2908                      modeset_required(state, NULL, dm_crtc_state->stream))) {
2909                 WARN_ON(1);
2910                 return ret;
2911         }
2912
2913         /* In some use cases, like reset, no stream  is attached */
2914         if (!dm_crtc_state->stream)
2915                 return 0;
2916
2917         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2918                 return 0;
2919
2920         return ret;
2921 }
2922
2923 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2924                                       const struct drm_display_mode *mode,
2925                                       struct drm_display_mode *adjusted_mode)
2926 {
2927         return true;
2928 }
2929
2930 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2931         .disable = dm_crtc_helper_disable,
2932         .atomic_check = dm_crtc_helper_atomic_check,
2933         .mode_fixup = dm_crtc_helper_mode_fixup
2934 };
2935
2936 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2937 {
2938
2939 }
2940
2941 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2942                                           struct drm_crtc_state *crtc_state,
2943                                           struct drm_connector_state *conn_state)
2944 {
2945         return 0;
2946 }
2947
2948 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2949         .disable = dm_encoder_helper_disable,
2950         .atomic_check = dm_encoder_helper_atomic_check
2951 };
2952
2953 static void dm_drm_plane_reset(struct drm_plane *plane)
2954 {
2955         struct dm_plane_state *amdgpu_state = NULL;
2956
2957         if (plane->state)
2958                 plane->funcs->atomic_destroy_state(plane, plane->state);
2959
2960         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2961         WARN_ON(amdgpu_state == NULL);
2962         
2963         if (amdgpu_state) {
2964                 plane->state = &amdgpu_state->base;
2965                 plane->state->plane = plane;
2966                 plane->state->rotation = DRM_MODE_ROTATE_0;
2967         }
2968 }
2969
2970 static struct drm_plane_state *
2971 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2972 {
2973         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2974
2975         old_dm_plane_state = to_dm_plane_state(plane->state);
2976         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2977         if (!dm_plane_state)
2978                 return NULL;
2979
2980         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2981
2982         if (old_dm_plane_state->dc_state) {
2983                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2984                 dc_plane_state_retain(dm_plane_state->dc_state);
2985         }
2986
2987         return &dm_plane_state->base;
2988 }
2989
2990 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2991                                 struct drm_plane_state *state)
2992 {
2993         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2994
2995         if (dm_plane_state->dc_state)
2996                 dc_plane_state_release(dm_plane_state->dc_state);
2997
2998         drm_atomic_helper_plane_destroy_state(plane, state);
2999 }
3000
3001 static const struct drm_plane_funcs dm_plane_funcs = {
3002         .update_plane   = drm_atomic_helper_update_plane,
3003         .disable_plane  = drm_atomic_helper_disable_plane,
3004         .destroy        = drm_plane_cleanup,
3005         .reset = dm_drm_plane_reset,
3006         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3007         .atomic_destroy_state = dm_drm_plane_destroy_state,
3008 };
3009
3010 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3011                                       struct drm_plane_state *new_state)
3012 {
3013         struct amdgpu_framebuffer *afb;
3014         struct drm_gem_object *obj;
3015         struct amdgpu_device *adev;
3016         struct amdgpu_bo *rbo;
3017         uint64_t chroma_addr = 0;
3018         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3019         unsigned int awidth;
3020         uint32_t domain;
3021         int r;
3022
3023         dm_plane_state_old = to_dm_plane_state(plane->state);
3024         dm_plane_state_new = to_dm_plane_state(new_state);
3025
3026         if (!new_state->fb) {
3027                 DRM_DEBUG_DRIVER("No FB bound\n");
3028                 return 0;
3029         }
3030
3031         afb = to_amdgpu_framebuffer(new_state->fb);
3032
3033         obj = afb->obj;
3034         rbo = gem_to_amdgpu_bo(obj);
3035         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3036         r = amdgpu_bo_reserve(rbo, false);
3037         if (unlikely(r != 0))
3038                 return r;
3039
3040         if (plane->type != DRM_PLANE_TYPE_CURSOR)
3041                 domain = amdgpu_display_framebuffer_domains(adev);
3042         else
3043                 domain = AMDGPU_GEM_DOMAIN_VRAM;
3044
3045         r = amdgpu_bo_pin(rbo, domain, &afb->address);
3046
3047         amdgpu_bo_unreserve(rbo);
3048
3049         if (unlikely(r != 0)) {
3050                 if (r != -ERESTARTSYS)
3051                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3052                 return r;
3053         }
3054
3055         amdgpu_bo_ref(rbo);
3056
3057         if (dm_plane_state_new->dc_state &&
3058                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3059                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3060
3061                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3062                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3063                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3064                 } else {
3065                         awidth = ALIGN(new_state->fb->width, 64);
3066                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3067                         plane_state->address.video_progressive.luma_addr.low_part
3068                                                         = lower_32_bits(afb->address);
3069                         plane_state->address.video_progressive.luma_addr.high_part
3070                                                         = upper_32_bits(afb->address);
3071                         chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3072                         plane_state->address.video_progressive.chroma_addr.low_part
3073                                                         = lower_32_bits(chroma_addr);
3074                         plane_state->address.video_progressive.chroma_addr.high_part
3075                                                         = upper_32_bits(chroma_addr);
3076                 }
3077         }
3078
3079         /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3080          * prepare and cleanup in drm_atomic_helper_prepare_planes
3081          * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3082          * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3083          * code touching fram buffers should be avoided for DC.
3084          */
3085         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3086                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3087
3088                 acrtc->cursor_bo = obj;
3089         }
3090         return 0;
3091 }
3092
3093 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3094                                        struct drm_plane_state *old_state)
3095 {
3096         struct amdgpu_bo *rbo;
3097         struct amdgpu_framebuffer *afb;
3098         int r;
3099
3100         if (!old_state->fb)
3101                 return;
3102
3103         afb = to_amdgpu_framebuffer(old_state->fb);
3104         rbo = gem_to_amdgpu_bo(afb->obj);
3105         r = amdgpu_bo_reserve(rbo, false);
3106         if (unlikely(r)) {
3107                 DRM_ERROR("failed to reserve rbo before unpin\n");
3108                 return;
3109         }
3110
3111         amdgpu_bo_unpin(rbo);
3112         amdgpu_bo_unreserve(rbo);
3113         amdgpu_bo_unref(&rbo);
3114 }
3115
3116 static int dm_plane_atomic_check(struct drm_plane *plane,
3117                                  struct drm_plane_state *state)
3118 {
3119         struct amdgpu_device *adev = plane->dev->dev_private;
3120         struct dc *dc = adev->dm.dc;
3121         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3122
3123         if (!dm_plane_state->dc_state)
3124                 return 0;
3125
3126         if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3127                 return -EINVAL;
3128
3129         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3130                 return 0;
3131
3132         return -EINVAL;
3133 }
3134
3135 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3136         .prepare_fb = dm_plane_helper_prepare_fb,
3137         .cleanup_fb = dm_plane_helper_cleanup_fb,
3138         .atomic_check = dm_plane_atomic_check,
3139 };
3140
3141 /*
3142  * TODO: these are currently initialized to rgb formats only.
3143  * For future use cases we should either initialize them dynamically based on
3144  * plane capabilities, or initialize this array to all formats, so internal drm
3145  * check will succeed, and let DC to implement proper check
3146  */
3147 static const uint32_t rgb_formats[] = {
3148         DRM_FORMAT_RGB888,
3149         DRM_FORMAT_XRGB8888,
3150         DRM_FORMAT_ARGB8888,