scsi: ufs: unexport descritpor reading functions
[muen/linux.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
43 #include <linux/of.h>
44 #include "ufshcd.h"
45 #include "ufs_quirks.h"
46 #include "unipro.h"
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/ufs.h>
50
51 #define UFSHCD_REQ_SENSE_SIZE   18
52
53 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
54                                  UTP_TASK_REQ_COMPL |\
55                                  UFSHCD_ERROR_MASK)
56 /* UIC command timeout, unit: ms */
57 #define UIC_CMD_TIMEOUT 500
58
59 /* NOP OUT retries waiting for NOP IN response */
60 #define NOP_OUT_RETRIES    10
61 /* Timeout after 30 msecs if NOP OUT hangs without response */
62 #define NOP_OUT_TIMEOUT    30 /* msecs */
63
64 /* Query request retries */
65 #define QUERY_REQ_RETRIES 3
66 /* Query request timeout */
67 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT  100 /* msecs */
71
72 /* maximum number of retries for a general UIC command  */
73 #define UFS_UIC_COMMAND_RETRIES 3
74
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
77
78 /* Maximum retries for Hibern8 enter */
79 #define UIC_HIBERN8_ENTER_RETRIES 3
80
81 /* maximum number of reset retries before giving up */
82 #define MAX_HOST_RESET_RETRIES 5
83
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
89
90 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
91         ({                                                              \
92                 int _ret;                                               \
93                 if (_on)                                                \
94                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
95                 else                                                    \
96                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
97                 _ret;                                                   \
98         })
99
100 #define ufshcd_hex_dump(prefix_str, buf, len) \
101 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
103 static u32 ufs_query_desc_max_size[] = {
104         QUERY_DESC_DEVICE_MAX_SIZE,
105         QUERY_DESC_CONFIGURAION_MAX_SIZE,
106         QUERY_DESC_UNIT_MAX_SIZE,
107         QUERY_DESC_RFU_MAX_SIZE,
108         QUERY_DESC_INTERCONNECT_MAX_SIZE,
109         QUERY_DESC_STRING_MAX_SIZE,
110         QUERY_DESC_RFU_MAX_SIZE,
111         QUERY_DESC_GEOMETRY_MAX_SIZE,
112         QUERY_DESC_POWER_MAX_SIZE,
113         QUERY_DESC_RFU_MAX_SIZE,
114 };
115
116 enum {
117         UFSHCD_MAX_CHANNEL      = 0,
118         UFSHCD_MAX_ID           = 1,
119         UFSHCD_CMD_PER_LUN      = 32,
120         UFSHCD_CAN_QUEUE        = 32,
121 };
122
123 /* UFSHCD states */
124 enum {
125         UFSHCD_STATE_RESET,
126         UFSHCD_STATE_ERROR,
127         UFSHCD_STATE_OPERATIONAL,
128         UFSHCD_STATE_EH_SCHEDULED,
129 };
130
131 /* UFSHCD error handling flags */
132 enum {
133         UFSHCD_EH_IN_PROGRESS = (1 << 0),
134 };
135
136 /* UFSHCD UIC layer error flags */
137 enum {
138         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
139         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
140         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
141         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
142         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
143         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
144 };
145
146 /* Interrupt configuration options */
147 enum {
148         UFSHCD_INT_DISABLE,
149         UFSHCD_INT_ENABLE,
150         UFSHCD_INT_CLEAR,
151 };
152
153 #define ufshcd_set_eh_in_progress(h) \
154         (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
155 #define ufshcd_eh_in_progress(h) \
156         (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
157 #define ufshcd_clear_eh_in_progress(h) \
158         (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
159
160 #define ufshcd_set_ufs_dev_active(h) \
161         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
162 #define ufshcd_set_ufs_dev_sleep(h) \
163         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
164 #define ufshcd_set_ufs_dev_poweroff(h) \
165         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
166 #define ufshcd_is_ufs_dev_active(h) \
167         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
168 #define ufshcd_is_ufs_dev_sleep(h) \
169         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
170 #define ufshcd_is_ufs_dev_poweroff(h) \
171         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
172
173 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
174         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
175         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
177         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
178         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
179         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
180 };
181
182 static inline enum ufs_dev_pwr_mode
183 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
184 {
185         return ufs_pm_lvl_states[lvl].dev_state;
186 }
187
188 static inline enum uic_link_state
189 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
190 {
191         return ufs_pm_lvl_states[lvl].link_state;
192 }
193
194 static inline enum ufs_pm_level
195 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
196                                         enum uic_link_state link_state)
197 {
198         enum ufs_pm_level lvl;
199
200         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
201                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
202                         (ufs_pm_lvl_states[lvl].link_state == link_state))
203                         return lvl;
204         }
205
206         /* if no match found, return the level 0 */
207         return UFS_PM_LVL_0;
208 }
209
210 static struct ufs_dev_fix ufs_fixups[] = {
211         /* UFS cards deviations table */
212         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
213                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
214         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
215         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
216                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
217         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
218                 UFS_DEVICE_NO_FASTAUTO),
219         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
220                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
221         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
222                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
223         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
224                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
225         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
226                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
227         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
228         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
229                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
230
231         END_FIX
232 };
233
234 static void ufshcd_tmc_handler(struct ufs_hba *hba);
235 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
236 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
237 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
238 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
239 static void ufshcd_hba_exit(struct ufs_hba *hba);
240 static int ufshcd_probe_hba(struct ufs_hba *hba);
241 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
242                                  bool skip_ref_clk);
243 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
244 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
245 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
246 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
247 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
248 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
249 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
250 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
251 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
252 static irqreturn_t ufshcd_intr(int irq, void *__hba);
253 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
254                 struct ufs_pa_layer_attr *desired_pwr_mode);
255 static int ufshcd_change_power_mode(struct ufs_hba *hba,
256                              struct ufs_pa_layer_attr *pwr_mode);
257 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
258 {
259         return tag >= 0 && tag < hba->nutrs;
260 }
261
262 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
263 {
264         int ret = 0;
265
266         if (!hba->is_irq_enabled) {
267                 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
268                                 hba);
269                 if (ret)
270                         dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
271                                 __func__, ret);
272                 hba->is_irq_enabled = true;
273         }
274
275         return ret;
276 }
277
278 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
279 {
280         if (hba->is_irq_enabled) {
281                 free_irq(hba->irq, hba);
282                 hba->is_irq_enabled = false;
283         }
284 }
285
286 /* replace non-printable or non-ASCII characters with spaces */
287 static inline void ufshcd_remove_non_printable(char *val)
288 {
289         if (!val)
290                 return;
291
292         if (*val < 0x20 || *val > 0x7e)
293                 *val = ' ';
294 }
295
296 static void ufshcd_add_command_trace(struct ufs_hba *hba,
297                 unsigned int tag, const char *str)
298 {
299         sector_t lba = -1;
300         u8 opcode = 0;
301         u32 intr, doorbell;
302         struct ufshcd_lrb *lrbp;
303         int transfer_len = -1;
304
305         if (!trace_ufshcd_command_enabled())
306                 return;
307
308         lrbp = &hba->lrb[tag];
309
310         if (lrbp->cmd) { /* data phase exists */
311                 opcode = (u8)(*lrbp->cmd->cmnd);
312                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
313                         /*
314                          * Currently we only fully trace read(10) and write(10)
315                          * commands
316                          */
317                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
318                                 lba =
319                                   lrbp->cmd->request->bio->bi_iter.bi_sector;
320                         transfer_len = be32_to_cpu(
321                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
322                 }
323         }
324
325         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
326         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
327         trace_ufshcd_command(dev_name(hba->dev), str, tag,
328                                 doorbell, transfer_len, intr, lba, opcode);
329 }
330
331 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
332 {
333         struct ufs_clk_info *clki;
334         struct list_head *head = &hba->clk_list_head;
335
336         if (!head || list_empty(head))
337                 return;
338
339         list_for_each_entry(clki, head, list) {
340                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
341                                 clki->max_freq)
342                         dev_err(hba->dev, "clk: %s, rate: %u\n",
343                                         clki->name, clki->curr_freq);
344         }
345 }
346
347 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
348                 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
349 {
350         int i;
351
352         for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
353                 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
354
355                 if (err_hist->reg[p] == 0)
356                         continue;
357                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
358                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
359         }
360 }
361
362 static void ufshcd_print_host_regs(struct ufs_hba *hba)
363 {
364         /*
365          * hex_dump reads its data without the readl macro. This might
366          * cause inconsistency issues on some platform, as the printed
367          * values may be from cache and not the most recent value.
368          * To know whether you are looking at an un-cached version verify
369          * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
370          * during platform/pci probe function.
371          */
372         ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
373         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
374                 hba->ufs_version, hba->capabilities);
375         dev_err(hba->dev,
376                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
377                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
378         dev_err(hba->dev,
379                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
380                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
381                 hba->ufs_stats.hibern8_exit_cnt);
382
383         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
384         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
385         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
386         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
387         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
388
389         ufshcd_print_clk_freqs(hba);
390
391         if (hba->vops && hba->vops->dbg_register_dump)
392                 hba->vops->dbg_register_dump(hba);
393 }
394
395 static
396 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
397 {
398         struct ufshcd_lrb *lrbp;
399         int tag;
400
401         for_each_set_bit(tag, &bitmap, hba->nutrs) {
402                 lrbp = &hba->lrb[tag];
403
404                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
405                                 tag, ktime_to_us(lrbp->issue_time_stamp));
406                 dev_err(hba->dev,
407                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
408                         tag, (u64)lrbp->utrd_dma_addr);
409
410                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
411                                 sizeof(struct utp_transfer_req_desc));
412                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
413                         (u64)lrbp->ucd_req_dma_addr);
414                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
415                                 sizeof(struct utp_upiu_req));
416                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
417                         (u64)lrbp->ucd_rsp_dma_addr);
418                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
419                                 sizeof(struct utp_upiu_rsp));
420                 if (pr_prdt) {
421                         int prdt_length = le16_to_cpu(
422                                 lrbp->utr_descriptor_ptr->prd_table_length);
423
424                         dev_err(hba->dev,
425                                 "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
426                                 tag, prdt_length,
427                                 (u64)lrbp->ucd_prdt_dma_addr);
428                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
429                                         sizeof(struct ufshcd_sg_entry) *
430                                         prdt_length);
431                 }
432         }
433 }
434
435 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
436 {
437         struct utp_task_req_desc *tmrdp;
438         int tag;
439
440         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
441                 tmrdp = &hba->utmrdl_base_addr[tag];
442                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
443                 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
444                                 sizeof(struct request_desc_header));
445                 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
446                                 tag);
447                 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
448                                 sizeof(struct utp_upiu_req));
449                 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
450                                 tag);
451                 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
452                                 sizeof(struct utp_task_req_desc));
453         }
454 }
455
456 /**
457  * ufshcd_print_pwr_info - print power params as saved in hba
458  * power info
459  * @hba: per-adapter instance
460  */
461 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
462 {
463         static const char * const names[] = {
464                 "INVALID MODE",
465                 "FAST MODE",
466                 "SLOW_MODE",
467                 "INVALID MODE",
468                 "FASTAUTO_MODE",
469                 "SLOWAUTO_MODE",
470                 "INVALID MODE",
471         };
472
473         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
474                  __func__,
475                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
476                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
477                  names[hba->pwr_info.pwr_rx],
478                  names[hba->pwr_info.pwr_tx],
479                  hba->pwr_info.hs_rate);
480 }
481
482 /*
483  * ufshcd_wait_for_register - wait for register value to change
484  * @hba - per-adapter interface
485  * @reg - mmio register offset
486  * @mask - mask to apply to read register value
487  * @val - wait condition
488  * @interval_us - polling interval in microsecs
489  * @timeout_ms - timeout in millisecs
490  * @can_sleep - perform sleep or just spin
491  *
492  * Returns -ETIMEDOUT on error, zero on success
493  */
494 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
495                                 u32 val, unsigned long interval_us,
496                                 unsigned long timeout_ms, bool can_sleep)
497 {
498         int err = 0;
499         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
500
501         /* ignore bits that we don't intend to wait on */
502         val = val & mask;
503
504         while ((ufshcd_readl(hba, reg) & mask) != val) {
505                 if (can_sleep)
506                         usleep_range(interval_us, interval_us + 50);
507                 else
508                         udelay(interval_us);
509                 if (time_after(jiffies, timeout)) {
510                         if ((ufshcd_readl(hba, reg) & mask) != val)
511                                 err = -ETIMEDOUT;
512                         break;
513                 }
514         }
515
516         return err;
517 }
518
519 /**
520  * ufshcd_get_intr_mask - Get the interrupt bit mask
521  * @hba - Pointer to adapter instance
522  *
523  * Returns interrupt bit mask per version
524  */
525 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
526 {
527         u32 intr_mask = 0;
528
529         switch (hba->ufs_version) {
530         case UFSHCI_VERSION_10:
531                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
532                 break;
533         /* allow fall through */
534         case UFSHCI_VERSION_11:
535         case UFSHCI_VERSION_20:
536                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
537                 break;
538         /* allow fall through */
539         case UFSHCI_VERSION_21:
540         default:
541                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
542         }
543
544         return intr_mask;
545 }
546
547 /**
548  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
549  * @hba - Pointer to adapter instance
550  *
551  * Returns UFSHCI version supported by the controller
552  */
553 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
554 {
555         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
556                 return ufshcd_vops_get_ufs_hci_version(hba);
557
558         return ufshcd_readl(hba, REG_UFS_VERSION);
559 }
560
561 /**
562  * ufshcd_is_device_present - Check if any device connected to
563  *                            the host controller
564  * @hba: pointer to adapter instance
565  *
566  * Returns 1 if device present, 0 if no device detected
567  */
568 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
569 {
570         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
571                                                 DEVICE_PRESENT) ? 1 : 0;
572 }
573
574 /**
575  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
576  * @lrb: pointer to local command reference block
577  *
578  * This function is used to get the OCS field from UTRD
579  * Returns the OCS field in the UTRD
580  */
581 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
582 {
583         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
584 }
585
586 /**
587  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
588  * @task_req_descp: pointer to utp_task_req_desc structure
589  *
590  * This function is used to get the OCS field from UTMRD
591  * Returns the OCS field in the UTMRD
592  */
593 static inline int
594 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
595 {
596         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
597 }
598
599 /**
600  * ufshcd_get_tm_free_slot - get a free slot for task management request
601  * @hba: per adapter instance
602  * @free_slot: pointer to variable with available slot value
603  *
604  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
605  * Returns 0 if free slot is not available, else return 1 with tag value
606  * in @free_slot.
607  */
608 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
609 {
610         int tag;
611         bool ret = false;
612
613         if (!free_slot)
614                 goto out;
615
616         do {
617                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
618                 if (tag >= hba->nutmrs)
619                         goto out;
620         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
621
622         *free_slot = tag;
623         ret = true;
624 out:
625         return ret;
626 }
627
628 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
629 {
630         clear_bit_unlock(slot, &hba->tm_slots_in_use);
631 }
632
633 /**
634  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
635  * @hba: per adapter instance
636  * @pos: position of the bit to be cleared
637  */
638 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
639 {
640         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
641 }
642
643 /**
644  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
645  * @hba: per adapter instance
646  * @tag: position of the bit to be cleared
647  */
648 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
649 {
650         __clear_bit(tag, &hba->outstanding_reqs);
651 }
652
653 /**
654  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
655  * @reg: Register value of host controller status
656  *
657  * Returns integer, 0 on Success and positive value if failed
658  */
659 static inline int ufshcd_get_lists_status(u32 reg)
660 {
661         /*
662          * The mask 0xFF is for the following HCS register bits
663          * Bit          Description
664          *  0           Device Present
665          *  1           UTRLRDY
666          *  2           UTMRLRDY
667          *  3           UCRDY
668          * 4-7          reserved
669          */
670         return ((reg & 0xFF) >> 1) ^ 0x07;
671 }
672
673 /**
674  * ufshcd_get_uic_cmd_result - Get the UIC command result
675  * @hba: Pointer to adapter instance
676  *
677  * This function gets the result of UIC command completion
678  * Returns 0 on success, non zero value on error
679  */
680 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
681 {
682         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
683                MASK_UIC_COMMAND_RESULT;
684 }
685
686 /**
687  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
688  * @hba: Pointer to adapter instance
689  *
690  * This function gets UIC command argument3
691  * Returns 0 on success, non zero value on error
692  */
693 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
694 {
695         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
696 }
697
698 /**
699  * ufshcd_get_req_rsp - returns the TR response transaction type
700  * @ucd_rsp_ptr: pointer to response UPIU
701  */
702 static inline int
703 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
704 {
705         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
706 }
707
708 /**
709  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
710  * @ucd_rsp_ptr: pointer to response UPIU
711  *
712  * This function gets the response status and scsi_status from response UPIU
713  * Returns the response result code.
714  */
715 static inline int
716 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
717 {
718         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
719 }
720
721 /*
722  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
723  *                              from response UPIU
724  * @ucd_rsp_ptr: pointer to response UPIU
725  *
726  * Return the data segment length.
727  */
728 static inline unsigned int
729 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
730 {
731         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
732                 MASK_RSP_UPIU_DATA_SEG_LEN;
733 }
734
735 /**
736  * ufshcd_is_exception_event - Check if the device raised an exception event
737  * @ucd_rsp_ptr: pointer to response UPIU
738  *
739  * The function checks if the device raised an exception event indicated in
740  * the Device Information field of response UPIU.
741  *
742  * Returns true if exception is raised, false otherwise.
743  */
744 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
745 {
746         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
747                         MASK_RSP_EXCEPTION_EVENT ? true : false;
748 }
749
750 /**
751  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
752  * @hba: per adapter instance
753  */
754 static inline void
755 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
756 {
757         ufshcd_writel(hba, INT_AGGR_ENABLE |
758                       INT_AGGR_COUNTER_AND_TIMER_RESET,
759                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
760 }
761
762 /**
763  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
764  * @hba: per adapter instance
765  * @cnt: Interrupt aggregation counter threshold
766  * @tmout: Interrupt aggregation timeout value
767  */
768 static inline void
769 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
770 {
771         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
772                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
773                       INT_AGGR_TIMEOUT_VAL(tmout),
774                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
775 }
776
777 /**
778  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
779  * @hba: per adapter instance
780  */
781 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
782 {
783         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
784 }
785
786 /**
787  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
788  *                      When run-stop registers are set to 1, it indicates the
789  *                      host controller that it can process the requests
790  * @hba: per adapter instance
791  */
792 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
793 {
794         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
795                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
796         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
797                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
798 }
799
800 /**
801  * ufshcd_hba_start - Start controller initialization sequence
802  * @hba: per adapter instance
803  */
804 static inline void ufshcd_hba_start(struct ufs_hba *hba)
805 {
806         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
807 }
808
809 /**
810  * ufshcd_is_hba_active - Get controller state
811  * @hba: per adapter instance
812  *
813  * Returns zero if controller is active, 1 otherwise
814  */
815 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
816 {
817         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
818 }
819
820 static const char *ufschd_uic_link_state_to_string(
821                         enum uic_link_state state)
822 {
823         switch (state) {
824         case UIC_LINK_OFF_STATE:        return "OFF";
825         case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
826         case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
827         default:                        return "UNKNOWN";
828         }
829 }
830
831 static const char *ufschd_ufs_dev_pwr_mode_to_string(
832                         enum ufs_dev_pwr_mode state)
833 {
834         switch (state) {
835         case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
836         case UFS_SLEEP_PWR_MODE:        return "SLEEP";
837         case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
838         default:                        return "UNKNOWN";
839         }
840 }
841
842 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
843 {
844         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
845         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
846             (hba->ufs_version == UFSHCI_VERSION_11))
847                 return UFS_UNIPRO_VER_1_41;
848         else
849                 return UFS_UNIPRO_VER_1_6;
850 }
851 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
852
853 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
854 {
855         /*
856          * If both host and device support UniPro ver1.6 or later, PA layer
857          * parameters tuning happens during link startup itself.
858          *
859          * We can manually tune PA layer parameters if either host or device
860          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
861          * logic simple, we will only do manual tuning if local unipro version
862          * doesn't support ver1.6 or later.
863          */
864         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
865                 return true;
866         else
867                 return false;
868 }
869
870 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
871 {
872         if (!ufshcd_is_clkscaling_supported(hba))
873                 return;
874
875         devfreq_suspend_device(hba->devfreq);
876         hba->clk_scaling.window_start_t = 0;
877 }
878
879 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
880 {
881         devfreq_resume_device(hba->devfreq);
882 }
883
884 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
885                 struct device_attribute *attr, char *buf)
886 {
887         struct ufs_hba *hba = dev_get_drvdata(dev);
888
889         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
890 }
891
892 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
893                 struct device_attribute *attr, const char *buf, size_t count)
894 {
895         struct ufs_hba *hba = dev_get_drvdata(dev);
896         u32 value;
897         int err;
898
899         if (kstrtou32(buf, 0, &value))
900                 return -EINVAL;
901
902         value = !!value;
903         if (value == hba->clk_scaling.is_allowed)
904                 goto out;
905
906         pm_runtime_get_sync(hba->dev);
907         ufshcd_hold(hba, false);
908
909         if (value) {
910                 ufshcd_resume_clkscaling(hba);
911         } else {
912                 ufshcd_suspend_clkscaling(hba);
913                 err = ufshcd_scale_clks(hba, true);
914                 if (err)
915                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
916                                         __func__, err);
917         }
918         hba->clk_scaling.is_allowed = value;
919
920         ufshcd_release(hba);
921         pm_runtime_put_sync(hba->dev);
922 out:
923         return count;
924 }
925
926 static void ufshcd_ungate_work(struct work_struct *work)
927 {
928         int ret;
929         unsigned long flags;
930         struct ufs_hba *hba = container_of(work, struct ufs_hba,
931                         clk_gating.ungate_work);
932
933         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
934
935         spin_lock_irqsave(hba->host->host_lock, flags);
936         if (hba->clk_gating.state == CLKS_ON) {
937                 spin_unlock_irqrestore(hba->host->host_lock, flags);
938                 goto unblock_reqs;
939         }
940
941         spin_unlock_irqrestore(hba->host->host_lock, flags);
942         ufshcd_setup_clocks(hba, true);
943
944         /* Exit from hibern8 */
945         if (ufshcd_can_hibern8_during_gating(hba)) {
946                 /* Prevent gating in this path */
947                 hba->clk_gating.is_suspended = true;
948                 if (ufshcd_is_link_hibern8(hba)) {
949                         ret = ufshcd_uic_hibern8_exit(hba);
950                         if (ret)
951                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
952                                         __func__, ret);
953                         else
954                                 ufshcd_set_link_active(hba);
955                 }
956                 hba->clk_gating.is_suspended = false;
957         }
958 unblock_reqs:
959         if (hba->clk_scaling.is_allowed)
960                 ufshcd_resume_clkscaling(hba);
961         scsi_unblock_requests(hba->host);
962 }
963
964 /**
965  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
966  * Also, exit from hibern8 mode and set the link as active.
967  * @hba: per adapter instance
968  * @async: This indicates whether caller should ungate clocks asynchronously.
969  */
970 int ufshcd_hold(struct ufs_hba *hba, bool async)
971 {
972         int rc = 0;
973         unsigned long flags;
974
975         if (!ufshcd_is_clkgating_allowed(hba))
976                 goto out;
977         spin_lock_irqsave(hba->host->host_lock, flags);
978         hba->clk_gating.active_reqs++;
979
980         if (ufshcd_eh_in_progress(hba)) {
981                 spin_unlock_irqrestore(hba->host->host_lock, flags);
982                 return 0;
983         }
984
985 start:
986         switch (hba->clk_gating.state) {
987         case CLKS_ON:
988                 /*
989                  * Wait for the ungate work to complete if in progress.
990                  * Though the clocks may be in ON state, the link could
991                  * still be in hibner8 state if hibern8 is allowed
992                  * during clock gating.
993                  * Make sure we exit hibern8 state also in addition to
994                  * clocks being ON.
995                  */
996                 if (ufshcd_can_hibern8_during_gating(hba) &&
997                     ufshcd_is_link_hibern8(hba)) {
998                         spin_unlock_irqrestore(hba->host->host_lock, flags);
999                         flush_work(&hba->clk_gating.ungate_work);
1000                         spin_lock_irqsave(hba->host->host_lock, flags);
1001                         goto start;
1002                 }
1003                 break;
1004         case REQ_CLKS_OFF:
1005                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1006                         hba->clk_gating.state = CLKS_ON;
1007                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1008                                                 hba->clk_gating.state);
1009                         break;
1010                 }
1011                 /*
1012                  * If we here, it means gating work is either done or
1013                  * currently running. Hence, fall through to cancel gating
1014                  * work and to enable clocks.
1015                  */
1016         case CLKS_OFF:
1017                 scsi_block_requests(hba->host);
1018                 hba->clk_gating.state = REQ_CLKS_ON;
1019                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1020                                         hba->clk_gating.state);
1021                 schedule_work(&hba->clk_gating.ungate_work);
1022                 /*
1023                  * fall through to check if we should wait for this
1024                  * work to be done or not.
1025                  */
1026         case REQ_CLKS_ON:
1027                 if (async) {
1028                         rc = -EAGAIN;
1029                         hba->clk_gating.active_reqs--;
1030                         break;
1031                 }
1032
1033                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1034                 flush_work(&hba->clk_gating.ungate_work);
1035                 /* Make sure state is CLKS_ON before returning */
1036                 spin_lock_irqsave(hba->host->host_lock, flags);
1037                 goto start;
1038         default:
1039                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1040                                 __func__, hba->clk_gating.state);
1041                 break;
1042         }
1043         spin_unlock_irqrestore(hba->host->host_lock, flags);
1044 out:
1045         return rc;
1046 }
1047 EXPORT_SYMBOL_GPL(ufshcd_hold);
1048
1049 static void ufshcd_gate_work(struct work_struct *work)
1050 {
1051         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1052                         clk_gating.gate_work.work);
1053         unsigned long flags;
1054
1055         spin_lock_irqsave(hba->host->host_lock, flags);
1056         /*
1057          * In case you are here to cancel this work the gating state
1058          * would be marked as REQ_CLKS_ON. In this case save time by
1059          * skipping the gating work and exit after changing the clock
1060          * state to CLKS_ON.
1061          */
1062         if (hba->clk_gating.is_suspended ||
1063                 (hba->clk_gating.state == REQ_CLKS_ON)) {
1064                 hba->clk_gating.state = CLKS_ON;
1065                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1066                                         hba->clk_gating.state);
1067                 goto rel_lock;
1068         }
1069
1070         if (hba->clk_gating.active_reqs
1071                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1072                 || hba->lrb_in_use || hba->outstanding_tasks
1073                 || hba->active_uic_cmd || hba->uic_async_done)
1074                 goto rel_lock;
1075
1076         spin_unlock_irqrestore(hba->host->host_lock, flags);
1077
1078         /* put the link into hibern8 mode before turning off clocks */
1079         if (ufshcd_can_hibern8_during_gating(hba)) {
1080                 if (ufshcd_uic_hibern8_enter(hba)) {
1081                         hba->clk_gating.state = CLKS_ON;
1082                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1083                                                 hba->clk_gating.state);
1084                         goto out;
1085                 }
1086                 ufshcd_set_link_hibern8(hba);
1087         }
1088
1089         ufshcd_suspend_clkscaling(hba);
1090
1091         if (!ufshcd_is_link_active(hba))
1092                 ufshcd_setup_clocks(hba, false);
1093         else
1094                 /* If link is active, device ref_clk can't be switched off */
1095                 __ufshcd_setup_clocks(hba, false, true);
1096
1097         /*
1098          * In case you are here to cancel this work the gating state
1099          * would be marked as REQ_CLKS_ON. In this case keep the state
1100          * as REQ_CLKS_ON which would anyway imply that clocks are off
1101          * and a request to turn them on is pending. By doing this way,
1102          * we keep the state machine in tact and this would ultimately
1103          * prevent from doing cancel work multiple times when there are
1104          * new requests arriving before the current cancel work is done.
1105          */
1106         spin_lock_irqsave(hba->host->host_lock, flags);
1107         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1108                 hba->clk_gating.state = CLKS_OFF;
1109                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1110                                         hba->clk_gating.state);
1111         }
1112 rel_lock:
1113         spin_unlock_irqrestore(hba->host->host_lock, flags);
1114 out:
1115         return;
1116 }
1117
1118 /* host lock must be held before calling this variant */
1119 static void __ufshcd_release(struct ufs_hba *hba)
1120 {
1121         if (!ufshcd_is_clkgating_allowed(hba))
1122                 return;
1123
1124         hba->clk_gating.active_reqs--;
1125
1126         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1127                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1128                 || hba->lrb_in_use || hba->outstanding_tasks
1129                 || hba->active_uic_cmd || hba->uic_async_done
1130                 || ufshcd_eh_in_progress(hba))
1131                 return;
1132
1133         hba->clk_gating.state = REQ_CLKS_OFF;
1134         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1135         schedule_delayed_work(&hba->clk_gating.gate_work,
1136                         msecs_to_jiffies(hba->clk_gating.delay_ms));
1137 }
1138
1139 void ufshcd_release(struct ufs_hba *hba)
1140 {
1141         unsigned long flags;
1142
1143         spin_lock_irqsave(hba->host->host_lock, flags);
1144         __ufshcd_release(hba);
1145         spin_unlock_irqrestore(hba->host->host_lock, flags);
1146 }
1147 EXPORT_SYMBOL_GPL(ufshcd_release);
1148
1149 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1150                 struct device_attribute *attr, char *buf)
1151 {
1152         struct ufs_hba *hba = dev_get_drvdata(dev);
1153
1154         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1155 }
1156
1157 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1158                 struct device_attribute *attr, const char *buf, size_t count)
1159 {
1160         struct ufs_hba *hba = dev_get_drvdata(dev);
1161         unsigned long flags, value;
1162
1163         if (kstrtoul(buf, 0, &value))
1164                 return -EINVAL;
1165
1166         spin_lock_irqsave(hba->host->host_lock, flags);
1167         hba->clk_gating.delay_ms = value;
1168         spin_unlock_irqrestore(hba->host->host_lock, flags);
1169         return count;
1170 }
1171
1172 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1173                 struct device_attribute *attr, char *buf)
1174 {
1175         struct ufs_hba *hba = dev_get_drvdata(dev);
1176
1177         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1178 }
1179
1180 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1181                 struct device_attribute *attr, const char *buf, size_t count)
1182 {
1183         struct ufs_hba *hba = dev_get_drvdata(dev);
1184         unsigned long flags;
1185         u32 value;
1186
1187         if (kstrtou32(buf, 0, &value))
1188                 return -EINVAL;
1189
1190         value = !!value;
1191         if (value == hba->clk_gating.is_enabled)
1192                 goto out;
1193
1194         if (value) {
1195                 ufshcd_release(hba);
1196         } else {
1197                 spin_lock_irqsave(hba->host->host_lock, flags);
1198                 hba->clk_gating.active_reqs++;
1199                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1200         }
1201
1202         hba->clk_gating.is_enabled = value;
1203 out:
1204         return count;
1205 }
1206
1207 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1208 {
1209         if (!ufshcd_is_clkgating_allowed(hba))
1210                 return;
1211
1212         hba->clk_gating.delay_ms = 150;
1213         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1214         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1215
1216         hba->clk_gating.is_enabled = true;
1217
1218         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1219         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1220         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1221         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1222         hba->clk_gating.delay_attr.attr.mode = 0644;
1223         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1224                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1225
1226         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1227         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1228         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1229         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1230         hba->clk_gating.enable_attr.attr.mode = 0644;
1231         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1232                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1233 }
1234
1235 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1236 {
1237         if (!ufshcd_is_clkgating_allowed(hba))
1238                 return;
1239         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1240         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1241         cancel_work_sync(&hba->clk_gating.ungate_work);
1242         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1243 }
1244
1245 /* Must be called with host lock acquired */
1246 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1247 {
1248         if (!ufshcd_is_clkscaling_supported(hba))
1249                 return;
1250
1251         if (!hba->clk_scaling.is_busy_started) {
1252                 hba->clk_scaling.busy_start_t = ktime_get();
1253                 hba->clk_scaling.is_busy_started = true;
1254         }
1255 }
1256
1257 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1258 {
1259         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1260
1261         if (!ufshcd_is_clkscaling_supported(hba))
1262                 return;
1263
1264         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1265                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1266                                         scaling->busy_start_t));
1267                 scaling->busy_start_t = 0;
1268                 scaling->is_busy_started = false;
1269         }
1270 }
1271 /**
1272  * ufshcd_send_command - Send SCSI or device management commands
1273  * @hba: per adapter instance
1274  * @task_tag: Task tag of the command
1275  */
1276 static inline
1277 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1278 {
1279         hba->lrb[task_tag].issue_time_stamp = ktime_get();
1280         ufshcd_clk_scaling_start_busy(hba);
1281         __set_bit(task_tag, &hba->outstanding_reqs);
1282         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1283         /* Make sure that doorbell is committed immediately */
1284         wmb();
1285         ufshcd_add_command_trace(hba, task_tag, "send");
1286 }
1287
1288 /**
1289  * ufshcd_copy_sense_data - Copy sense data in case of check condition
1290  * @lrb - pointer to local reference block
1291  */
1292 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1293 {
1294         int len;
1295         if (lrbp->sense_buffer &&
1296             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1297                 int len_to_copy;
1298
1299                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1300                 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1301
1302                 memcpy(lrbp->sense_buffer,
1303                         lrbp->ucd_rsp_ptr->sr.sense_data,
1304                         min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1305         }
1306 }
1307
1308 /**
1309  * ufshcd_copy_query_response() - Copy the Query Response and the data
1310  * descriptor
1311  * @hba: per adapter instance
1312  * @lrb - pointer to local reference block
1313  */
1314 static
1315 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1316 {
1317         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1318
1319         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1320
1321         /* Get the descriptor */
1322         if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1323                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1324                                 GENERAL_UPIU_REQUEST_SIZE;
1325                 u16 resp_len;
1326                 u16 buf_len;
1327
1328                 /* data segment length */
1329                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1330                                                 MASK_QUERY_DATA_SEG_LEN;
1331                 buf_len = be16_to_cpu(
1332                                 hba->dev_cmd.query.request.upiu_req.length);
1333                 if (likely(buf_len >= resp_len)) {
1334                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1335                 } else {
1336                         dev_warn(hba->dev,
1337                                 "%s: Response size is bigger than buffer",
1338                                 __func__);
1339                         return -EINVAL;
1340                 }
1341         }
1342
1343         return 0;
1344 }
1345
1346 /**
1347  * ufshcd_hba_capabilities - Read controller capabilities
1348  * @hba: per adapter instance
1349  */
1350 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1351 {
1352         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1353
1354         /* nutrs and nutmrs are 0 based values */
1355         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1356         hba->nutmrs =
1357         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1358 }
1359
1360 /**
1361  * ufshcd_ready_for_uic_cmd - Check if controller is ready
1362  *                            to accept UIC commands
1363  * @hba: per adapter instance
1364  * Return true on success, else false
1365  */
1366 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1367 {
1368         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1369                 return true;
1370         else
1371                 return false;
1372 }
1373
1374 /**
1375  * ufshcd_get_upmcrs - Get the power mode change request status
1376  * @hba: Pointer to adapter instance
1377  *
1378  * This function gets the UPMCRS field of HCS register
1379  * Returns value of UPMCRS field
1380  */
1381 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1382 {
1383         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1384 }
1385
1386 /**
1387  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1388  * @hba: per adapter instance
1389  * @uic_cmd: UIC command
1390  *
1391  * Mutex must be held.
1392  */
1393 static inline void
1394 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1395 {
1396         WARN_ON(hba->active_uic_cmd);
1397
1398         hba->active_uic_cmd = uic_cmd;
1399
1400         /* Write Args */
1401         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1402         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1403         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1404
1405         /* Write UIC Cmd */
1406         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1407                       REG_UIC_COMMAND);
1408 }
1409
1410 /**
1411  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1412  * @hba: per adapter instance
1413  * @uic_command: UIC command
1414  *
1415  * Must be called with mutex held.
1416  * Returns 0 only if success.
1417  */
1418 static int
1419 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1420 {
1421         int ret;
1422         unsigned long flags;
1423
1424         if (wait_for_completion_timeout(&uic_cmd->done,
1425                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1426                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1427         else
1428                 ret = -ETIMEDOUT;
1429
1430         spin_lock_irqsave(hba->host->host_lock, flags);
1431         hba->active_uic_cmd = NULL;
1432         spin_unlock_irqrestore(hba->host->host_lock, flags);
1433
1434         return ret;
1435 }
1436
1437 /**
1438  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1439  * @hba: per adapter instance
1440  * @uic_cmd: UIC command
1441  * @completion: initialize the completion only if this is set to true
1442  *
1443  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
1444  * with mutex held and host_lock locked.
1445  * Returns 0 only if success.
1446  */
1447 static int
1448 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1449                       bool completion)
1450 {
1451         if (!ufshcd_ready_for_uic_cmd(hba)) {
1452                 dev_err(hba->dev,
1453                         "Controller not ready to accept UIC commands\n");
1454                 return -EIO;
1455         }
1456
1457         if (completion)
1458                 init_completion(&uic_cmd->done);
1459
1460         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1461
1462         return 0;
1463 }
1464
1465 /**
1466  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1467  * @hba: per adapter instance
1468  * @uic_cmd: UIC command
1469  *
1470  * Returns 0 only if success.
1471  */
1472 static int
1473 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1474 {
1475         int ret;
1476         unsigned long flags;
1477
1478         ufshcd_hold(hba, false);
1479         mutex_lock(&hba->uic_cmd_mutex);
1480         ufshcd_add_delay_before_dme_cmd(hba);
1481
1482         spin_lock_irqsave(hba->host->host_lock, flags);
1483         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1484         spin_unlock_irqrestore(hba->host->host_lock, flags);
1485         if (!ret)
1486                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1487
1488         mutex_unlock(&hba->uic_cmd_mutex);
1489
1490         ufshcd_release(hba);
1491         return ret;
1492 }
1493
1494 /**
1495  * ufshcd_map_sg - Map scatter-gather list to prdt
1496  * @lrbp - pointer to local reference block
1497  *
1498  * Returns 0 in case of success, non-zero value in case of failure
1499  */
1500 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1501 {
1502         struct ufshcd_sg_entry *prd_table;
1503         struct scatterlist *sg;
1504         struct scsi_cmnd *cmd;
1505         int sg_segments;
1506         int i;
1507
1508         cmd = lrbp->cmd;
1509         sg_segments = scsi_dma_map(cmd);
1510         if (sg_segments < 0)
1511                 return sg_segments;
1512
1513         if (sg_segments) {
1514                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1515                         lrbp->utr_descriptor_ptr->prd_table_length =
1516                                 cpu_to_le16((u16)(sg_segments *
1517                                         sizeof(struct ufshcd_sg_entry)));
1518                 else
1519                         lrbp->utr_descriptor_ptr->prd_table_length =
1520                                 cpu_to_le16((u16) (sg_segments));
1521
1522                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1523
1524                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1525                         prd_table[i].size  =
1526                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1527                         prd_table[i].base_addr =
1528                                 cpu_to_le32(lower_32_bits(sg->dma_address));
1529                         prd_table[i].upper_addr =
1530                                 cpu_to_le32(upper_32_bits(sg->dma_address));
1531                         prd_table[i].reserved = 0;
1532                 }
1533         } else {
1534                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1535         }
1536
1537         return 0;
1538 }
1539
1540 /**
1541  * ufshcd_enable_intr - enable interrupts
1542  * @hba: per adapter instance
1543  * @intrs: interrupt bits
1544  */
1545 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1546 {
1547         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1548
1549         if (hba->ufs_version == UFSHCI_VERSION_10) {
1550                 u32 rw;
1551                 rw = set & INTERRUPT_MASK_RW_VER_10;
1552                 set = rw | ((set ^ intrs) & intrs);
1553         } else {
1554                 set |= intrs;
1555         }
1556
1557         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1558 }
1559
1560 /**
1561  * ufshcd_disable_intr - disable interrupts
1562  * @hba: per adapter instance
1563  * @intrs: interrupt bits
1564  */
1565 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1566 {
1567         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1568
1569         if (hba->ufs_version == UFSHCI_VERSION_10) {
1570                 u32 rw;
1571                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1572                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
1573                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1574
1575         } else {
1576                 set &= ~intrs;
1577         }
1578
1579         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1580 }
1581
1582 /**
1583  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1584  * descriptor according to request
1585  * @lrbp: pointer to local reference block
1586  * @upiu_flags: flags required in the header
1587  * @cmd_dir: requests data direction
1588  */
1589 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1590                         u32 *upiu_flags, enum dma_data_direction cmd_dir)
1591 {
1592         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1593         u32 data_direction;
1594         u32 dword_0;
1595
1596         if (cmd_dir == DMA_FROM_DEVICE) {
1597                 data_direction = UTP_DEVICE_TO_HOST;
1598                 *upiu_flags = UPIU_CMD_FLAGS_READ;
1599         } else if (cmd_dir == DMA_TO_DEVICE) {
1600                 data_direction = UTP_HOST_TO_DEVICE;
1601                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1602         } else {
1603                 data_direction = UTP_NO_DATA_TRANSFER;
1604                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1605         }
1606
1607         dword_0 = data_direction | (lrbp->command_type
1608                                 << UPIU_COMMAND_TYPE_OFFSET);
1609         if (lrbp->intr_cmd)
1610                 dword_0 |= UTP_REQ_DESC_INT_CMD;
1611
1612         /* Transfer request descriptor header fields */
1613         req_desc->header.dword_0 = cpu_to_le32(dword_0);
1614         /* dword_1 is reserved, hence it is set to 0 */
1615         req_desc->header.dword_1 = 0;
1616         /*
1617          * assigning invalid value for command status. Controller
1618          * updates OCS on command completion, with the command
1619          * status
1620          */
1621         req_desc->header.dword_2 =
1622                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1623         /* dword_3 is reserved, hence it is set to 0 */
1624         req_desc->header.dword_3 = 0;
1625
1626         req_desc->prd_table_length = 0;
1627 }
1628
1629 /**
1630  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1631  * for scsi commands
1632  * @lrbp - local reference block pointer
1633  * @upiu_flags - flags
1634  */
1635 static
1636 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1637 {
1638         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1639         unsigned short cdb_len;
1640
1641         /* command descriptor fields */
1642         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1643                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
1644                                 lrbp->lun, lrbp->task_tag);
1645         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1646                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1647
1648         /* Total EHS length and Data segment length will be zero */
1649         ucd_req_ptr->header.dword_2 = 0;
1650
1651         ucd_req_ptr->sc.exp_data_transfer_len =
1652                 cpu_to_be32(lrbp->cmd->sdb.length);
1653
1654         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1655         memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1656         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1657
1658         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1659 }
1660
1661 /**
1662  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1663  * for query requsts
1664  * @hba: UFS hba
1665  * @lrbp: local reference block pointer
1666  * @upiu_flags: flags
1667  */
1668 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1669                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1670 {
1671         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1672         struct ufs_query *query = &hba->dev_cmd.query;
1673         u16 len = be16_to_cpu(query->request.upiu_req.length);
1674         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1675
1676         /* Query request header */
1677         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1678                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1679                         lrbp->lun, lrbp->task_tag);
1680         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1681                         0, query->request.query_func, 0, 0);
1682
1683         /* Data segment length only need for WRITE_DESC */
1684         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1685                 ucd_req_ptr->header.dword_2 =
1686                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1687         else
1688                 ucd_req_ptr->header.dword_2 = 0;
1689
1690         /* Copy the Query Request buffer as is */
1691         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1692                         QUERY_OSF_SIZE);
1693
1694         /* Copy the Descriptor */
1695         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1696                 memcpy(descp, query->descriptor, len);
1697
1698         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1699 }
1700
1701 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1702 {
1703         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1704
1705         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1706
1707         /* command descriptor fields */
1708         ucd_req_ptr->header.dword_0 =
1709                 UPIU_HEADER_DWORD(
1710                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1711         /* clear rest of the fields of basic header */
1712         ucd_req_ptr->header.dword_1 = 0;
1713         ucd_req_ptr->header.dword_2 = 0;
1714
1715         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1716 }
1717
1718 /**
1719  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1720  *                           for Device Management Purposes
1721  * @hba - per adapter instance
1722  * @lrb - pointer to local reference block
1723  */
1724 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1725 {
1726         u32 upiu_flags;
1727         int ret = 0;
1728
1729         if (hba->ufs_version == UFSHCI_VERSION_20)
1730                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1731         else
1732                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1733
1734         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1735         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1736                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1737         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1738                 ufshcd_prepare_utp_nop_upiu(lrbp);
1739         else
1740                 ret = -EINVAL;
1741
1742         return ret;
1743 }
1744
1745 /**
1746  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1747  *                         for SCSI Purposes
1748  * @hba - per adapter instance
1749  * @lrb - pointer to local reference block
1750  */
1751 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1752 {
1753         u32 upiu_flags;
1754         int ret = 0;
1755
1756         if (hba->ufs_version == UFSHCI_VERSION_20)
1757                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1758         else
1759                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1760
1761         if (likely(lrbp->cmd)) {
1762                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1763                                                 lrbp->cmd->sc_data_direction);
1764                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1765         } else {
1766                 ret = -EINVAL;
1767         }
1768
1769         return ret;
1770 }
1771
1772 /*
1773  * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1774  * @scsi_lun: scsi LUN id
1775  *
1776  * Returns UPIU LUN id
1777  */
1778 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1779 {
1780         if (scsi_is_wlun(scsi_lun))
1781                 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1782                         | UFS_UPIU_WLUN_ID;
1783         else
1784                 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1785 }
1786
1787 /**
1788  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1789  * @scsi_lun: UPIU W-LUN id
1790  *
1791  * Returns SCSI W-LUN id
1792  */
1793 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1794 {
1795         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1796 }
1797
1798 /**
1799  * ufshcd_queuecommand - main entry point for SCSI requests
1800  * @cmd: command from SCSI Midlayer
1801  * @done: call back function
1802  *
1803  * Returns 0 for success, non-zero in case of failure
1804  */
1805 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1806 {
1807         struct ufshcd_lrb *lrbp;
1808         struct ufs_hba *hba;
1809         unsigned long flags;
1810         int tag;
1811         int err = 0;
1812
1813         hba = shost_priv(host);
1814
1815         tag = cmd->request->tag;
1816         if (!ufshcd_valid_tag(hba, tag)) {
1817                 dev_err(hba->dev,
1818                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1819                         __func__, tag, cmd, cmd->request);
1820                 BUG();
1821         }
1822
1823         spin_lock_irqsave(hba->host->host_lock, flags);
1824         switch (hba->ufshcd_state) {
1825         case UFSHCD_STATE_OPERATIONAL:
1826                 break;
1827         case UFSHCD_STATE_EH_SCHEDULED:
1828         case UFSHCD_STATE_RESET:
1829                 err = SCSI_MLQUEUE_HOST_BUSY;
1830                 goto out_unlock;
1831         case UFSHCD_STATE_ERROR:
1832                 set_host_byte(cmd, DID_ERROR);
1833                 cmd->scsi_done(cmd);
1834                 goto out_unlock;
1835         default:
1836                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1837                                 __func__, hba->ufshcd_state);
1838                 set_host_byte(cmd, DID_BAD_TARGET);
1839                 cmd->scsi_done(cmd);
1840                 goto out_unlock;
1841         }
1842
1843         /* if error handling is in progress, don't issue commands */
1844         if (ufshcd_eh_in_progress(hba)) {
1845                 set_host_byte(cmd, DID_ERROR);
1846                 cmd->scsi_done(cmd);
1847                 goto out_unlock;
1848         }
1849         spin_unlock_irqrestore(hba->host->host_lock, flags);
1850
1851         /* acquire the tag to make sure device cmds don't use it */
1852         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1853                 /*
1854                  * Dev manage command in progress, requeue the command.
1855                  * Requeuing the command helps in cases where the request *may*
1856                  * find different tag instead of waiting for dev manage command
1857                  * completion.
1858                  */
1859                 err = SCSI_MLQUEUE_HOST_BUSY;
1860                 goto out;
1861         }
1862
1863         err = ufshcd_hold(hba, true);
1864         if (err) {
1865                 err = SCSI_MLQUEUE_HOST_BUSY;
1866                 clear_bit_unlock(tag, &hba->lrb_in_use);
1867                 goto out;
1868         }
1869         WARN_ON(hba->clk_gating.state != CLKS_ON);
1870
1871         lrbp = &hba->lrb[tag];
1872
1873         WARN_ON(lrbp->cmd);
1874         lrbp->cmd = cmd;
1875         lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
1876         lrbp->sense_buffer = cmd->sense_buffer;
1877         lrbp->task_tag = tag;
1878         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1879         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
1880
1881         ufshcd_comp_scsi_upiu(hba, lrbp);
1882
1883         err = ufshcd_map_sg(hba, lrbp);
1884         if (err) {
1885                 lrbp->cmd = NULL;
1886                 clear_bit_unlock(tag, &hba->lrb_in_use);
1887                 goto out;
1888         }
1889         /* Make sure descriptors are ready before ringing the doorbell */
1890         wmb();
1891
1892         /* issue command to the controller */
1893         spin_lock_irqsave(hba->host->host_lock, flags);
1894         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
1895         ufshcd_send_command(hba, tag);
1896 out_unlock:
1897         spin_unlock_irqrestore(hba->host->host_lock, flags);
1898 out:
1899         return err;
1900 }
1901
1902 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1903                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1904 {
1905         lrbp->cmd = NULL;
1906         lrbp->sense_bufflen = 0;
1907         lrbp->sense_buffer = NULL;
1908         lrbp->task_tag = tag;
1909         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
1910         lrbp->intr_cmd = true; /* No interrupt aggregation */
1911         hba->dev_cmd.type = cmd_type;
1912
1913         return ufshcd_comp_devman_upiu(hba, lrbp);
1914 }
1915
1916 static int
1917 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1918 {
1919         int err = 0;
1920         unsigned long flags;
1921         u32 mask = 1 << tag;
1922
1923         /* clear outstanding transaction before retry */
1924         spin_lock_irqsave(hba->host->host_lock, flags);
1925         ufshcd_utrl_clear(hba, tag);
1926         spin_unlock_irqrestore(hba->host->host_lock, flags);
1927
1928         /*
1929          * wait for for h/w to clear corresponding bit in door-bell.
1930          * max. wait is 1 sec.
1931          */
1932         err = ufshcd_wait_for_register(hba,
1933                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
1934                         mask, ~mask, 1000, 1000, true);
1935
1936         return err;
1937 }
1938
1939 static int
1940 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1941 {
1942         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1943
1944         /* Get the UPIU response */
1945         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1946                                 UPIU_RSP_CODE_OFFSET;
1947         return query_res->response;
1948 }
1949
1950 /**
1951  * ufshcd_dev_cmd_completion() - handles device management command responses
1952  * @hba: per adapter instance
1953  * @lrbp: pointer to local reference block
1954  */
1955 static int
1956 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1957 {
1958         int resp;
1959         int err = 0;
1960
1961         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
1962         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1963
1964         switch (resp) {
1965         case UPIU_TRANSACTION_NOP_IN:
1966                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1967                         err = -EINVAL;
1968                         dev_err(hba->dev, "%s: unexpected response %x\n",
1969                                         __func__, resp);
1970                 }
1971                 break;
1972         case UPIU_TRANSACTION_QUERY_RSP:
1973                 err = ufshcd_check_query_response(hba, lrbp);
1974                 if (!err)
1975                         err = ufshcd_copy_query_response(hba, lrbp);
1976                 break;
1977         case UPIU_TRANSACTION_REJECT_UPIU:
1978                 /* TODO: handle Reject UPIU Response */
1979                 err = -EPERM;
1980                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1981                                 __func__);
1982                 break;
1983         default:
1984                 err = -EINVAL;
1985                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1986                                 __func__, resp);
1987                 break;
1988         }
1989
1990         return err;
1991 }
1992
1993 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1994                 struct ufshcd_lrb *lrbp, int max_timeout)
1995 {
1996         int err = 0;
1997         unsigned long time_left;
1998         unsigned long flags;
1999
2000         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2001                         msecs_to_jiffies(max_timeout));
2002
2003         /* Make sure descriptors are ready before ringing the doorbell */
2004         wmb();
2005         spin_lock_irqsave(hba->host->host_lock, flags);
2006         hba->dev_cmd.complete = NULL;
2007         if (likely(time_left)) {
2008                 err = ufshcd_get_tr_ocs(lrbp);
2009                 if (!err)
2010                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2011         }
2012         spin_unlock_irqrestore(hba->host->host_lock, flags);
2013
2014         if (!time_left) {
2015                 err = -ETIMEDOUT;
2016                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2017                         __func__, lrbp->task_tag);
2018                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2019                         /* successfully cleared the command, retry if needed */
2020                         err = -EAGAIN;
2021                 /*
2022                  * in case of an error, after clearing the doorbell,
2023                  * we also need to clear the outstanding_request
2024                  * field in hba
2025                  */
2026                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2027         }
2028
2029         return err;
2030 }
2031
2032 /**
2033  * ufshcd_get_dev_cmd_tag - Get device management command tag
2034  * @hba: per-adapter instance
2035  * @tag: pointer to variable with available slot value
2036  *
2037  * Get a free slot and lock it until device management command
2038  * completes.
2039  *
2040  * Returns false if free slot is unavailable for locking, else
2041  * return true with tag value in @tag.
2042  */
2043 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2044 {
2045         int tag;
2046         bool ret = false;
2047         unsigned long tmp;
2048
2049         if (!tag_out)
2050                 goto out;
2051
2052         do {
2053                 tmp = ~hba->lrb_in_use;
2054                 tag = find_last_bit(&tmp, hba->nutrs);
2055                 if (tag >= hba->nutrs)
2056                         goto out;
2057         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2058
2059         *tag_out = tag;
2060         ret = true;
2061 out:
2062         return ret;
2063 }
2064
2065 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2066 {
2067         clear_bit_unlock(tag, &hba->lrb_in_use);
2068 }
2069
2070 /**
2071  * ufshcd_exec_dev_cmd - API for sending device management requests
2072  * @hba - UFS hba
2073  * @cmd_type - specifies the type (NOP, Query...)
2074  * @timeout - time in seconds
2075  *
2076  * NOTE: Since there is only one available tag for device management commands,
2077  * it is expected you hold the hba->dev_cmd.lock mutex.
2078  */
2079 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2080                 enum dev_cmd_type cmd_type, int timeout)
2081 {
2082         struct ufshcd_lrb *lrbp;
2083         int err;
2084         int tag;
2085         struct completion wait;
2086         unsigned long flags;
2087
2088         /*
2089          * Get free slot, sleep if slots are unavailable.
2090          * Even though we use wait_event() which sleeps indefinitely,
2091          * the maximum wait time is bounded by SCSI request timeout.
2092          */
2093         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2094
2095         init_completion(&wait);
2096         lrbp = &hba->lrb[tag];
2097         WARN_ON(lrbp->cmd);
2098         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2099         if (unlikely(err))
2100                 goto out_put_tag;
2101
2102         hba->dev_cmd.complete = &wait;
2103
2104         /* Make sure descriptors are ready before ringing the doorbell */
2105         wmb();
2106         spin_lock_irqsave(hba->host->host_lock, flags);
2107         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2108         ufshcd_send_command(hba, tag);
2109         spin_unlock_irqrestore(hba->host->host_lock, flags);
2110
2111         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2112
2113 out_put_tag:
2114         ufshcd_put_dev_cmd_tag(hba, tag);
2115         wake_up(&hba->dev_cmd.tag_wq);
2116         return err;
2117 }
2118
2119 /**
2120  * ufshcd_init_query() - init the query response and request parameters
2121  * @hba: per-adapter instance
2122  * @request: address of the request pointer to be initialized
2123  * @response: address of the response pointer to be initialized
2124  * @opcode: operation to perform
2125  * @idn: flag idn to access
2126  * @index: LU number to access
2127  * @selector: query/flag/descriptor further identification
2128  */
2129 static inline void ufshcd_init_query(struct ufs_hba *hba,
2130                 struct ufs_query_req **request, struct ufs_query_res **response,
2131                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2132 {
2133         *request = &hba->dev_cmd.query.request;
2134         *response = &hba->dev_cmd.query.response;
2135         memset(*request, 0, sizeof(struct ufs_query_req));
2136         memset(*response, 0, sizeof(struct ufs_query_res));
2137         (*request)->upiu_req.opcode = opcode;
2138         (*request)->upiu_req.idn = idn;
2139         (*request)->upiu_req.index = index;
2140         (*request)->upiu_req.selector = selector;
2141 }
2142
2143 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2144         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2145 {
2146         int ret;
2147         int retries;
2148
2149         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2150                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2151                 if (ret)
2152                         dev_dbg(hba->dev,
2153                                 "%s: failed with error %d, retries %d\n",
2154                                 __func__, ret, retries);
2155                 else
2156                         break;
2157         }
2158
2159         if (ret)
2160                 dev_err(hba->dev,
2161                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2162                         __func__, opcode, idn, ret, retries);
2163         return ret;
2164 }
2165
2166 /**
2167  * ufshcd_query_flag() - API function for sending flag query requests
2168  * hba: per-adapter instance
2169  * query_opcode: flag query to perform
2170  * idn: flag idn to access
2171  * flag_res: the flag value after the query request completes
2172  *
2173  * Returns 0 for success, non-zero in case of failure
2174  */
2175 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2176                         enum flag_idn idn, bool *flag_res)
2177 {
2178         struct ufs_query_req *request = NULL;
2179         struct ufs_query_res *response = NULL;
2180         int err, index = 0, selector = 0;
2181         int timeout = QUERY_REQ_TIMEOUT;
2182
2183         BUG_ON(!hba);
2184
2185         ufshcd_hold(hba, false);
2186         mutex_lock(&hba->dev_cmd.lock);
2187         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2188                         selector);
2189
2190         switch (opcode) {
2191         case UPIU_QUERY_OPCODE_SET_FLAG:
2192         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2193         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2194                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2195                 break;
2196         case UPIU_QUERY_OPCODE_READ_FLAG:
2197                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2198                 if (!flag_res) {
2199                         /* No dummy reads */
2200                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2201                                         __func__);
2202                         err = -EINVAL;
2203                         goto out_unlock;
2204                 }
2205                 break;
2206         default:
2207                 dev_err(hba->dev,
2208                         "%s: Expected query flag opcode but got = %d\n",
2209                         __func__, opcode);
2210                 err = -EINVAL;
2211                 goto out_unlock;
2212         }
2213
2214         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2215
2216         if (err) {
2217                 dev_err(hba->dev,
2218                         "%s: Sending flag query for idn %d failed, err = %d\n",
2219                         __func__, idn, err);
2220                 goto out_unlock;
2221         }
2222
2223         if (flag_res)
2224                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2225                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2226
2227 out_unlock:
2228         mutex_unlock(&hba->dev_cmd.lock);
2229         ufshcd_release(hba);
2230         return err;
2231 }
2232
2233 /**
2234  * ufshcd_query_attr - API function for sending attribute requests
2235  * hba: per-adapter instance
2236  * opcode: attribute opcode
2237  * idn: attribute idn to access
2238  * index: index field
2239  * selector: selector field
2240  * attr_val: the attribute value after the query request completes
2241  *
2242  * Returns 0 for success, non-zero in case of failure
2243 */
2244 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2245                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2246 {
2247         struct ufs_query_req *request = NULL;
2248         struct ufs_query_res *response = NULL;
2249         int err;
2250
2251         BUG_ON(!hba);
2252
2253         ufshcd_hold(hba, false);
2254         if (!attr_val) {
2255                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2256                                 __func__, opcode);
2257                 err = -EINVAL;
2258                 goto out;
2259         }
2260
2261         mutex_lock(&hba->dev_cmd.lock);
2262         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2263                         selector);
2264
2265         switch (opcode) {
2266         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2267                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2268                 request->upiu_req.value = cpu_to_be32(*attr_val);
2269                 break;
2270         case UPIU_QUERY_OPCODE_READ_ATTR:
2271                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2272                 break;
2273         default:
2274                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2275                                 __func__, opcode);
2276                 err = -EINVAL;
2277                 goto out_unlock;
2278         }
2279
2280         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2281
2282         if (err) {
2283                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2284                                 __func__, opcode, idn, index, err);
2285                 goto out_unlock;
2286         }
2287
2288         *attr_val = be32_to_cpu(response->upiu_res.value);
2289
2290 out_unlock:
2291         mutex_unlock(&hba->dev_cmd.lock);
2292 out:
2293         ufshcd_release(hba);
2294         return err;
2295 }
2296
2297 /**
2298  * ufshcd_query_attr_retry() - API function for sending query
2299  * attribute with retries
2300  * @hba: per-adapter instance
2301  * @opcode: attribute opcode
2302  * @idn: attribute idn to access
2303  * @index: index field
2304  * @selector: selector field
2305  * @attr_val: the attribute value after the query request
2306  * completes
2307  *
2308  * Returns 0 for success, non-zero in case of failure
2309 */
2310 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2311         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2312         u32 *attr_val)
2313 {
2314         int ret = 0;
2315         u32 retries;
2316
2317          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2318                 ret = ufshcd_query_attr(hba, opcode, idn, index,
2319                                                 selector, attr_val);
2320                 if (ret)
2321                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2322                                 __func__, ret, retries);
2323                 else
2324                         break;
2325         }
2326
2327         if (ret)
2328                 dev_err(hba->dev,
2329                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2330                         __func__, idn, ret, QUERY_REQ_RETRIES);
2331         return ret;
2332 }
2333
2334 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2335                         enum query_opcode opcode, enum desc_idn idn, u8 index,
2336                         u8 selector, u8 *desc_buf, int *buf_len)
2337 {
2338         struct ufs_query_req *request = NULL;
2339         struct ufs_query_res *response = NULL;
2340         int err;
2341
2342         BUG_ON(!hba);
2343
2344         ufshcd_hold(hba, false);
2345         if (!desc_buf) {
2346                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2347                                 __func__, opcode);
2348                 err = -EINVAL;
2349                 goto out;
2350         }
2351
2352         if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2353                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2354                                 __func__, *buf_len);
2355                 err = -EINVAL;
2356                 goto out;
2357         }
2358
2359         mutex_lock(&hba->dev_cmd.lock);
2360         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2361                         selector);
2362         hba->dev_cmd.query.descriptor = desc_buf;
2363         request->upiu_req.length = cpu_to_be16(*buf_len);
2364
2365         switch (opcode) {
2366         case UPIU_QUERY_OPCODE_WRITE_DESC:
2367                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2368                 break;
2369         case UPIU_QUERY_OPCODE_READ_DESC:
2370                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2371                 break;
2372         default:
2373                 dev_err(hba->dev,
2374                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2375                                 __func__, opcode);
2376                 err = -EINVAL;
2377                 goto out_unlock;
2378         }
2379
2380         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2381
2382         if (err) {
2383                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2384                                 __func__, opcode, idn, index, err);
2385                 goto out_unlock;
2386         }
2387
2388         hba->dev_cmd.query.descriptor = NULL;
2389         *buf_len = be16_to_cpu(response->upiu_res.length);
2390
2391 out_unlock:
2392         mutex_unlock(&hba->dev_cmd.lock);
2393 out:
2394         ufshcd_release(hba);
2395         return err;
2396 }
2397
2398 /**
2399  * ufshcd_query_descriptor_retry - API function for sending descriptor
2400  * requests
2401  * hba: per-adapter instance
2402  * opcode: attribute opcode
2403  * idn: attribute idn to access
2404  * index: index field
2405  * selector: selector field
2406  * desc_buf: the buffer that contains the descriptor
2407  * buf_len: length parameter passed to the device
2408  *
2409  * Returns 0 for success, non-zero in case of failure.
2410  * The buf_len parameter will contain, on return, the length parameter
2411  * received on the response.
2412  */
2413 static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2414                                          enum query_opcode opcode,
2415                                          enum desc_idn idn, u8 index,
2416                                          u8 selector,
2417                                          u8 *desc_buf, int *buf_len)
2418 {
2419         int err;
2420         int retries;
2421
2422         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2423                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2424                                                 selector, desc_buf, buf_len);
2425                 if (!err || err == -EINVAL)
2426                         break;
2427         }
2428
2429         return err;
2430 }
2431
2432 /**
2433  * ufshcd_read_desc_param - read the specified descriptor parameter
2434  * @hba: Pointer to adapter instance
2435  * @desc_id: descriptor idn value
2436  * @desc_index: descriptor index
2437  * @param_offset: offset of the parameter to read
2438  * @param_read_buf: pointer to buffer where parameter would be read
2439  * @param_size: sizeof(param_read_buf)
2440  *
2441  * Return 0 in case of success, non-zero otherwise
2442  */
2443 static int ufshcd_read_desc_param(struct ufs_hba *hba,
2444                                   enum desc_idn desc_id,
2445                                   int desc_index,
2446                                   u32 param_offset,
2447                                   u8 *param_read_buf,
2448                                   u32 param_size)
2449 {
2450         int ret;
2451         u8 *desc_buf;
2452         u32 buff_len;
2453         bool is_kmalloc = true;
2454
2455         /* safety checks */
2456         if (desc_id >= QUERY_DESC_IDN_MAX)
2457                 return -EINVAL;
2458
2459         buff_len = ufs_query_desc_max_size[desc_id];
2460         if ((param_offset + param_size) > buff_len)
2461                 return -EINVAL;
2462
2463         if (!param_offset && (param_size == buff_len)) {
2464                 /* memory space already available to hold full descriptor */
2465                 desc_buf = param_read_buf;
2466                 is_kmalloc = false;
2467         } else {
2468                 /* allocate memory to hold full descriptor */
2469                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2470                 if (!desc_buf)
2471                         return -ENOMEM;
2472         }
2473
2474         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2475                                         desc_id, desc_index, 0, desc_buf,
2476                                         &buff_len);
2477
2478         if (ret) {
2479                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2480                         __func__, desc_id, desc_index, param_offset, ret);
2481
2482                 goto out;
2483         }
2484
2485         /* Sanity check */
2486         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2487                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2488                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2489                 ret = -EINVAL;
2490                 goto out;
2491         }
2492
2493         /*
2494          * While reading variable size descriptors (like string descriptor),
2495          * some UFS devices may report the "LENGTH" (field in "Transaction
2496          * Specific fields" of Query Response UPIU) same as what was requested
2497          * in Query Request UPIU instead of reporting the actual size of the
2498          * variable size descriptor.
2499          * Although it's safe to ignore the "LENGTH" field for variable size
2500          * descriptors as we can always derive the length of the descriptor from
2501          * the descriptor header fields. Hence this change impose the length
2502          * match check only for fixed size descriptors (for which we always
2503          * request the correct size as part of Query Request UPIU).
2504          */
2505         if ((desc_id != QUERY_DESC_IDN_STRING) &&
2506             (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
2507                 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
2508                         __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
2509                 ret = -EINVAL;
2510                 goto out;
2511         }
2512
2513         if (is_kmalloc)
2514                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2515 out:
2516         if (is_kmalloc)
2517                 kfree(desc_buf);
2518         return ret;
2519 }
2520
2521 static inline int ufshcd_read_desc(struct ufs_hba *hba,
2522                                    enum desc_idn desc_id,
2523                                    int desc_index,
2524                                    u8 *buf,
2525                                    u32 size)
2526 {
2527         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2528 }
2529
2530 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2531                                          u8 *buf,
2532                                          u32 size)
2533 {
2534         int err = 0;
2535         int retries;
2536
2537         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2538                 /* Read descriptor*/
2539                 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2540                 if (!err)
2541                         break;
2542                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2543         }
2544
2545         return err;
2546 }
2547
2548 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2549 {
2550         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2551 }
2552
2553 /**
2554  * ufshcd_read_string_desc - read string descriptor
2555  * @hba: pointer to adapter instance
2556  * @desc_index: descriptor index
2557  * @buf: pointer to buffer where descriptor would be read
2558  * @size: size of buf
2559  * @ascii: if true convert from unicode to ascii characters
2560  *
2561  * Return 0 in case of success, non-zero otherwise
2562  */
2563 #define ASCII_STD true
2564 static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
2565                                    u8 *buf, u32 size, bool ascii)
2566 {
2567         int err = 0;
2568
2569         err = ufshcd_read_desc(hba,
2570                                 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2571
2572         if (err) {
2573                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2574                         __func__, QUERY_REQ_RETRIES, err);
2575                 goto out;
2576         }
2577
2578         if (ascii) {
2579                 int desc_len;
2580                 int ascii_len;
2581                 int i;
2582                 char *buff_ascii;
2583
2584                 desc_len = buf[0];
2585                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2586                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2587                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2588                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2589                                         __func__);
2590                         err = -ENOMEM;
2591                         goto out;
2592                 }
2593
2594                 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2595                 if (!buff_ascii) {
2596                         err = -ENOMEM;
2597                         goto out;
2598                 }
2599
2600                 /*
2601                  * the descriptor contains string in UTF16 format
2602                  * we need to convert to utf-8 so it can be displayed
2603                  */
2604                 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2605                                 desc_len - QUERY_DESC_HDR_SIZE,
2606                                 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2607
2608                 /* replace non-printable or non-ASCII characters with spaces */
2609                 for (i = 0; i < ascii_len; i++)
2610                         ufshcd_remove_non_printable(&buff_ascii[i]);
2611
2612                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2613                                 size - QUERY_DESC_HDR_SIZE);
2614                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2615                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
2616                 kfree(buff_ascii);
2617         }
2618 out:
2619         return err;
2620 }
2621
2622 /**
2623  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2624  * @hba: Pointer to adapter instance
2625  * @lun: lun id
2626  * @param_offset: offset of the parameter to read
2627  * @param_read_buf: pointer to buffer where parameter would be read
2628  * @param_size: sizeof(param_read_buf)
2629  *
2630  * Return 0 in case of success, non-zero otherwise
2631  */
2632 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2633                                               int lun,
2634                                               enum unit_desc_param param_offset,
2635                                               u8 *param_read_buf,
2636                                               u32 param_size)
2637 {
2638         /*
2639          * Unit descriptors are only available for general purpose LUs (LUN id
2640          * from 0 to 7) and RPMB Well known LU.
2641          */
2642         if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
2643                 return -EOPNOTSUPP;
2644
2645         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2646                                       param_offset, param_read_buf, param_size);
2647 }
2648
2649 /**
2650  * ufshcd_memory_alloc - allocate memory for host memory space data structures
2651  * @hba: per adapter instance
2652  *
2653  * 1. Allocate DMA memory for Command Descriptor array
2654  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2655  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2656  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2657  *      (UTMRDL)
2658  * 4. Allocate memory for local reference block(lrb).
2659  *
2660  * Returns 0 for success, non-zero in case of failure
2661  */
2662 static int ufshcd_memory_alloc(struct ufs_hba *hba)
2663 {
2664         size_t utmrdl_size, utrdl_size, ucdl_size;
2665
2666         /* Allocate memory for UTP command descriptors */
2667         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2668         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2669                                                   ucdl_size,
2670                                                   &hba->ucdl_dma_addr,
2671                                                   GFP_KERNEL);
2672
2673         /*
2674          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2675          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2676          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2677          * be aligned to 128 bytes as well
2678          */
2679         if (!hba->ucdl_base_addr ||
2680             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
2681                 dev_err(hba->dev,
2682                         "Command Descriptor Memory allocation failed\n");
2683                 goto out;
2684         }
2685
2686         /*
2687          * Allocate memory for UTP Transfer descriptors
2688          * UFSHCI requires 1024 byte alignment of UTRD
2689          */
2690         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2691         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2692                                                    utrdl_size,
2693                                                    &hba->utrdl_dma_addr,
2694                                                    GFP_KERNEL);
2695         if (!hba->utrdl_base_addr ||
2696             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
2697                 dev_err(hba->dev,
2698                         "Transfer Descriptor Memory allocation failed\n");
2699                 goto out;
2700         }
2701
2702         /*
2703          * Allocate memory for UTP Task Management descriptors
2704          * UFSHCI requires 1024 byte alignment of UTMRD
2705          */
2706         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2707         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2708                                                     utmrdl_size,
2709                                                     &hba->utmrdl_dma_addr,
2710                                                     GFP_KERNEL);
2711         if (!hba->utmrdl_base_addr ||
2712             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
2713                 dev_err(hba->dev,
2714                 "Task Management Descriptor Memory allocation failed\n");
2715                 goto out;
2716         }
2717
2718         /* Allocate memory for local reference block */
2719         hba->lrb = devm_kzalloc(hba->dev,
2720                                 hba->nutrs * sizeof(struct ufshcd_lrb),
2721                                 GFP_KERNEL);
2722         if (!hba->lrb) {
2723                 dev_err(hba->dev, "LRB Memory allocation failed\n");
2724                 goto out;
2725         }
2726         return 0;
2727 out:
2728         return -ENOMEM;
2729 }
2730
2731 /**
2732  * ufshcd_host_memory_configure - configure local reference block with
2733  *                              memory offsets
2734  * @hba: per adapter instance
2735  *
2736  * Configure Host memory space
2737  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2738  * address.
2739  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2740  * and PRDT offset.
2741  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2742  * into local reference block.
2743  */
2744 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2745 {
2746         struct utp_transfer_cmd_desc *cmd_descp;
2747         struct utp_transfer_req_desc *utrdlp;
2748         dma_addr_t cmd_desc_dma_addr;
2749         dma_addr_t cmd_desc_element_addr;
2750         u16 response_offset;
2751         u16 prdt_offset;
2752         int cmd_desc_size;
2753         int i;
2754
2755         utrdlp = hba->utrdl_base_addr;
2756         cmd_descp = hba->ucdl_base_addr;
2757
2758         response_offset =
2759                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2760         prdt_offset =
2761                 offsetof(struct utp_transfer_cmd_desc, prd_table);
2762
2763         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2764         cmd_desc_dma_addr = hba->ucdl_dma_addr;
2765
2766         for (i = 0; i < hba->nutrs; i++) {
2767                 /* Configure UTRD with command descriptor base address */
2768                 cmd_desc_element_addr =
2769                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
2770                 utrdlp[i].command_desc_base_addr_lo =
2771                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2772                 utrdlp[i].command_desc_base_addr_hi =
2773                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2774
2775                 /* Response upiu and prdt offset should be in double words */
2776                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2777                         utrdlp[i].response_upiu_offset =
2778                                 cpu_to_le16(response_offset);
2779                         utrdlp[i].prd_table_offset =
2780                                 cpu_to_le16(prdt_offset);
2781                         utrdlp[i].response_upiu_length =
2782                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
2783                 } else {
2784                         utrdlp[i].response_upiu_offset =
2785                                 cpu_to_le16((response_offset >> 2));
2786                         utrdlp[i].prd_table_offset =
2787                                 cpu_to_le16((prdt_offset >> 2));
2788                         utrdlp[i].response_upiu_length =
2789                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2790                 }
2791
2792                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2793                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
2794                                 (i * sizeof(struct utp_transfer_req_desc));
2795                 hba->lrb[i].ucd_req_ptr =
2796                         (struct utp_upiu_req *)(cmd_descp + i);
2797                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
2798                 hba->lrb[i].ucd_rsp_ptr =
2799                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2800                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
2801                                 response_offset;
2802                 hba->lrb[i].ucd_prdt_ptr =
2803                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2804                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
2805                                 prdt_offset;
2806         }
2807 }
2808
2809 /**
2810  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2811  * @hba: per adapter instance
2812  *
2813  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2814  * in order to initialize the Unipro link startup procedure.
2815  * Once the Unipro links are up, the device connected to the controller
2816  * is detected.
2817  *
2818  * Returns 0 on success, non-zero value on failure
2819  */
2820 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2821 {
2822         struct uic_command uic_cmd = {0};
2823         int ret;
2824
2825         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2826
2827         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2828         if (ret)
2829                 dev_dbg(hba->dev,
2830                         "dme-link-startup: error code %d\n", ret);
2831         return ret;
2832 }
2833
2834 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2835 {
2836         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
2837         unsigned long min_sleep_time_us;
2838
2839         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2840                 return;
2841
2842         /*
2843          * last_dme_cmd_tstamp will be 0 only for 1st call to
2844          * this function
2845          */
2846         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2847                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2848         } else {
2849                 unsigned long delta =
2850                         (unsigned long) ktime_to_us(
2851                                 ktime_sub(ktime_get(),
2852                                 hba->last_dme_cmd_tstamp));
2853
2854                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2855                         min_sleep_time_us =
2856                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2857                 else
2858                         return; /* no more delay required */
2859         }
2860
2861         /* allow sleep for extra 50us if needed */
2862         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2863 }
2864
2865 /**
2866  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2867  * @hba: per adapter instance
2868  * @attr_sel: uic command argument1
2869  * @attr_set: attribute set type as uic command argument2
2870  * @mib_val: setting value as uic command argument3
2871  * @peer: indicate whether peer or local
2872  *
2873  * Returns 0 on success, non-zero value on failure
2874  */
2875 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2876                         u8 attr_set, u32 mib_val, u8 peer)
2877 {
2878         struct uic_command uic_cmd = {0};
2879         static const char *const action[] = {
2880                 "dme-set",
2881                 "dme-peer-set"
2882         };
2883         const char *set = action[!!peer];
2884         int ret;
2885         int retries = UFS_UIC_COMMAND_RETRIES;
2886
2887         uic_cmd.command = peer ?
2888                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2889         uic_cmd.argument1 = attr_sel;
2890         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2891         uic_cmd.argument3 = mib_val;
2892
2893         do {
2894                 /* for peer attributes we retry upon failure */
2895                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2896                 if (ret)
2897                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2898                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2899         } while (ret && peer && --retries);
2900
2901         if (ret)
2902                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2903                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2904                         UFS_UIC_COMMAND_RETRIES - retries);
2905
2906         return ret;
2907 }
2908 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2909
2910 /**
2911  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2912  * @hba: per adapter instance
2913  * @attr_sel: uic command argument1
2914  * @mib_val: the value of the attribute as returned by the UIC command
2915  * @peer: indicate whether peer or local
2916  *
2917  * Returns 0 on success, non-zero value on failure
2918  */
2919 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2920                         u32 *mib_val, u8 peer)
2921 {
2922         struct uic_command uic_cmd = {0};
2923         static const char *const action[] = {
2924                 "dme-get",
2925                 "dme-peer-get"
2926         };
2927         const char *get = action[!!peer];
2928         int ret;
2929         int retries = UFS_UIC_COMMAND_RETRIES;
2930         struct ufs_pa_layer_attr orig_pwr_info;
2931         struct ufs_pa_layer_attr temp_pwr_info;
2932         bool pwr_mode_change = false;
2933
2934         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2935                 orig_pwr_info = hba->pwr_info;
2936                 temp_pwr_info = orig_pwr_info;
2937
2938                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2939                     orig_pwr_info.pwr_rx == FAST_MODE) {
2940                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2941                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2942                         pwr_mode_change = true;
2943                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2944                     orig_pwr_info.pwr_rx == SLOW_MODE) {
2945                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2946                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2947                         pwr_mode_change = true;
2948                 }
2949                 if (pwr_mode_change) {
2950                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2951                         if (ret)
2952                                 goto out;
2953                 }
2954         }
2955
2956         uic_cmd.command = peer ?
2957                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2958         uic_cmd.argument1 = attr_sel;
2959
2960         do {
2961                 /* for peer attributes we retry upon failure */
2962                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2963                 if (ret)
2964                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2965                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
2966         } while (ret && peer && --retries);
2967
2968         if (ret)
2969                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2970                         get, UIC_GET_ATTR_ID(attr_sel),
2971                         UFS_UIC_COMMAND_RETRIES - retries);
2972
2973         if (mib_val && !ret)
2974                 *mib_val = uic_cmd.argument3;
2975
2976         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2977             && pwr_mode_change)
2978                 ufshcd_change_power_mode(hba, &orig_pwr_info);
2979 out:
2980         return ret;
2981 }
2982 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2983
2984 /**
2985  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2986  * state) and waits for it to take effect.
2987  *
2988  * @hba: per adapter instance
2989  * @cmd: UIC command to execute
2990  *
2991  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2992  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2993  * and device UniPro link and hence it's final completion would be indicated by
2994  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2995  * addition to normal UIC command completion Status (UCCS). This function only
2996  * returns after the relevant status bits indicate the completion.
2997  *
2998  * Returns 0 on success, non-zero value on failure
2999  */
3000 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3001 {
3002         struct completion uic_async_done;
3003         unsigned long flags;
3004         u8 status;
3005         int ret;
3006         bool reenable_intr = false;
3007
3008         mutex_lock(&hba->uic_cmd_mutex);
3009         init_completion(&uic_async_done);
3010         ufshcd_add_delay_before_dme_cmd(hba);
3011
3012         spin_lock_irqsave(hba->host->host_lock, flags);
3013         hba->uic_async_done = &uic_async_done;
3014         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3015                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3016                 /*
3017                  * Make sure UIC command completion interrupt is disabled before
3018                  * issuing UIC command.
3019                  */
3020                 wmb();
3021                 reenable_intr = true;
3022         }
3023         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3024         spin_unlock_irqrestore(hba->host->host_lock, flags);
3025         if (ret) {
3026                 dev_err(hba->dev,
3027                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3028                         cmd->command, cmd->argument3, ret);
3029                 goto out;
3030         }
3031
3032         if (!wait_for_completion_timeout(hba->uic_async_done,
3033                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3034                 dev_err(hba->dev,
3035                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3036                         cmd->command, cmd->argument3);
3037                 ret = -ETIMEDOUT;
3038                 goto out;
3039         }
3040
3041         status = ufshcd_get_upmcrs(hba);
3042         if (status != PWR_LOCAL) {
3043                 dev_err(hba->dev,
3044                         "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3045                         cmd->command, status);
3046                 ret = (status != PWR_OK) ? status : -1;
3047         }
3048 out:
3049         spin_lock_irqsave(hba->host->host_lock, flags);
3050         hba->active_uic_cmd = NULL;
3051         hba->uic_async_done = NULL;
3052         if (reenable_intr)
3053                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3054         spin_unlock_irqrestore(hba->host->host_lock, flags);
3055         mutex_unlock(&hba->uic_cmd_mutex);
3056
3057         return ret;
3058 }
3059
3060 /**
3061  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3062  *                              using DME_SET primitives.
3063  * @hba: per adapter instance
3064  * @mode: powr mode value
3065  *
3066  * Returns 0 on success, non-zero value on failure
3067  */
3068 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3069 {
3070         struct uic_command uic_cmd = {0};
3071         int ret;
3072
3073         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3074                 ret = ufshcd_dme_set(hba,
3075                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3076                 if (ret) {
3077                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3078                                                 __func__, ret);
3079                         goto out;
3080                 }
3081         }
3082
3083         uic_cmd.command = UIC_CMD_DME_SET;
3084         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3085         uic_cmd.argument3 = mode;
3086         ufshcd_hold(hba, false);
3087         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3088         ufshcd_release(hba);
3089
3090 out:
3091         return ret;
3092 }
3093
3094 static int ufshcd_link_recovery(struct ufs_hba *hba)
3095 {
3096         int ret;
3097         unsigned long flags;
3098
3099         spin_lock_irqsave(hba->host->host_lock, flags);
3100         hba->ufshcd_state = UFSHCD_STATE_RESET;
3101         ufshcd_set_eh_in_progress(hba);
3102         spin_unlock_irqrestore(hba->host->host_lock, flags);
3103
3104         ret = ufshcd_host_reset_and_restore(hba);
3105
3106         spin_lock_irqsave(hba->host->host_lock, flags);
3107         if (ret)
3108                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3109         ufshcd_clear_eh_in_progress(hba);
3110         spin_unlock_irqrestore(hba->host->host_lock, flags);
3111
3112         if (ret)
3113                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3114                         __func__, ret);
3115
3116         return ret;
3117 }
3118
3119 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3120 {
3121         int ret;
3122         struct uic_command uic_cmd = {0};
3123         ktime_t start = ktime_get();
3124
3125         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3126
3127         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3128         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3129         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3130                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3131
3132         if (ret) {
3133                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3134                         __func__, ret);
3135
3136                 /*
3137                  * If link recovery fails then return error so that caller
3138                  * don't retry the hibern8 enter again.
3139                  */
3140                 if (ufshcd_link_recovery(hba))
3141                         ret = -ENOLINK;
3142         } else
3143                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3144                                                                 POST_CHANGE);
3145
3146         return ret;
3147 }
3148
3149 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3150 {
3151         int ret = 0, retries;
3152
3153         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3154                 ret = __ufshcd_uic_hibern8_enter(hba);
3155                 if (!ret || ret == -ENOLINK)
3156                         goto out;
3157         }
3158 out:
3159         return ret;
3160 }
3161
3162 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3163 {
3164         struct uic_command uic_cmd = {0};
3165         int ret;
3166         ktime_t start = ktime_get();
3167
3168         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3169
3170         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3171         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3172         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3173                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3174
3175         if (ret) {
3176                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3177                         __func__, ret);
3178                 ret = ufshcd_link_recovery(hba);
3179         } else {
3180                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3181                                                                 POST_CHANGE);
3182                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3183                 hba->ufs_stats.hibern8_exit_cnt++;
3184         }
3185
3186         return ret;
3187 }
3188
3189  /**
3190  * ufshcd_init_pwr_info - setting the POR (power on reset)
3191  * values in hba power info
3192  * @hba: per-adapter instance
3193  */
3194 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3195 {
3196         hba->pwr_info.gear_rx = UFS_PWM_G1;
3197         hba->pwr_info.gear_tx = UFS_PWM_G1;
3198         hba->pwr_info.lane_rx = 1;
3199         hba->pwr_info.lane_tx = 1;
3200         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3201         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3202         hba->pwr_info.hs_rate = 0;
3203 }
3204
3205 /**
3206  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3207  * @hba: per-adapter instance
3208  */
3209 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3210 {
3211         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3212
3213         if (hba->max_pwr_info.is_valid)
3214                 return 0;
3215
3216         pwr_info->pwr_tx = FAST_MODE;
3217         pwr_info->pwr_rx = FAST_MODE;
3218         pwr_info->hs_rate = PA_HS_MODE_B;
3219
3220         /* Get the connected lane count */
3221         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3222                         &pwr_info->lane_rx);
3223         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3224                         &pwr_info->lane_tx);
3225
3226         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3227                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3228                                 __func__,
3229                                 pwr_info->lane_rx,
3230                                 pwr_info->lane_tx);
3231                 return -EINVAL;
3232         }
3233
3234         /*
3235          * First, get the maximum gears of HS speed.
3236          * If a zero value, it means there is no HSGEAR capability.
3237          * Then, get the maximum gears of PWM speed.
3238          */
3239         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3240         if (!pwr_info->gear_rx) {
3241                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3242                                 &pwr_info->gear_rx);
3243                 if (!pwr_info->gear_rx) {
3244                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3245                                 __func__, pwr_info->gear_rx);
3246                         return -EINVAL;
3247                 }
3248                 pwr_info->pwr_rx = SLOW_MODE;
3249         }
3250
3251         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3252                         &pwr_info->gear_tx);
3253         if (!pwr_info->gear_tx) {
3254                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3255                                 &pwr_info->gear_tx);
3256                 if (!pwr_info->gear_tx) {
3257                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3258                                 __func__, pwr_info->gear_tx);
3259                         return -EINVAL;
3260                 }
3261                 pwr_info->pwr_tx = SLOW_MODE;
3262         }
3263
3264         hba->max_pwr_info.is_valid = true;
3265         return 0;
3266 }
3267
3268 static int ufshcd_change_power_mode(struct ufs_hba *hba,
3269                              struct ufs_pa_layer_attr *pwr_mode)
3270 {
3271         int ret;
3272
3273         /* if already configured to the requested pwr_mode */
3274         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3275             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3276             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3277             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3278             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3279             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3280             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3281                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3282                 return 0;
3283         }
3284
3285         /*
3286          * Configure attributes for power mode change with below.
3287          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3288          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3289          * - PA_HSSERIES
3290          */
3291         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3292         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3293                         pwr_mode->lane_rx);
3294         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3295                         pwr_mode->pwr_rx == FAST_MODE)
3296                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
3297         else
3298                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
3299
3300         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3301         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3302                         pwr_mode->lane_tx);
3303         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3304                         pwr_mode->pwr_tx == FAST_MODE)
3305                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
3306         else
3307                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
3308
3309         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3310             pwr_mode->pwr_tx == FASTAUTO_MODE ||
3311             pwr_mode->pwr_rx == FAST_MODE ||
3312             pwr_mode->pwr_tx == FAST_MODE)
3313                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3314                                                 pwr_mode->hs_rate);
3315
3316         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3317                         | pwr_mode->pwr_tx);
3318
3319         if (ret) {
3320                 dev_err(hba->dev,
3321                         "%s: power mode change failed %d\n", __func__, ret);
3322         } else {
3323                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3324                                                                 pwr_mode);
3325
3326                 memcpy(&hba->pwr_info, pwr_mode,
3327                         sizeof(struct ufs_pa_layer_attr));
3328         }
3329
3330         ufshcd_print_pwr_info(hba);
3331
3332         return ret;
3333 }
3334
3335 /**
3336  * ufshcd_config_pwr_mode - configure a new power mode
3337  * @hba: per-adapter instance
3338  * @desired_pwr_mode: desired power configuration
3339  */
3340 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3341                 struct ufs_pa_layer_attr *desired_pwr_mode)
3342 {
3343         struct ufs_pa_layer_attr final_params = { 0 };
3344         int ret;
3345
3346         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3347                                         desired_pwr_mode, &final_params);
3348
3349         if (ret)
3350                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3351
3352         ret = ufshcd_change_power_mode(hba, &final_params);
3353
3354         return ret;
3355 }
3356
3357 /**
3358  * ufshcd_complete_dev_init() - checks device readiness
3359  * hba: per-adapter instance
3360  *
3361  * Set fDeviceInit flag and poll until device toggles it.
3362  */
3363 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3364 {
3365         int i;
3366         int err;
3367         bool flag_res = 1;
3368
3369         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3370                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
3371         if (err) {
3372                 dev_err(hba->dev,
3373                         "%s setting fDeviceInit flag failed with error %d\n",
3374                         __func__, err);
3375                 goto out;
3376         }
3377
3378         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3379         for (i = 0; i < 1000 && !err && flag_res; i++)
3380                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3381                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3382
3383         if (err)
3384                 dev_err(hba->dev,
3385                         "%s reading fDeviceInit flag failed with error %d\n",
3386                         __func__, err);
3387         else if (flag_res)
3388                 dev_err(hba->dev,
3389                         "%s fDeviceInit was not cleared by the device\n",
3390                         __func__);
3391
3392 out:
3393         return err;
3394 }
3395
3396 /**
3397  * ufshcd_make_hba_operational - Make UFS controller operational
3398  * @hba: per adapter instance
3399  *
3400  * To bring UFS host controller to operational state,
3401  * 1. Enable required interrupts
3402  * 2. Configure interrupt aggregation
3403  * 3. Program UTRL and UTMRL base address
3404  * 4. Configure run-stop-registers
3405  *
3406  * Returns 0 on success, non-zero value on failure
3407  */
3408 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3409 {
3410         int err = 0;
3411         u32 reg;
3412
3413         /* Enable required interrupts */
3414         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3415
3416         /* Configure interrupt aggregation */
3417         if (ufshcd_is_intr_aggr_allowed(hba))
3418                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3419         else
3420                 ufshcd_disable_intr_aggr(hba);
3421
3422         /* Configure UTRL and UTMRL base address registers */
3423         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3424                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3425         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3426                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3427         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3428                         REG_UTP_TASK_REQ_LIST_BASE_L);
3429         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3430                         REG_UTP_TASK_REQ_LIST_BASE_H);
3431