Merge tags 'cris-for-4.16' and 'cris-for-4.16-urgent' of git://git.kernel.org/pub...
[muen/linux.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 #include <linux/percpu_ida.h>
50
51 #include "vhost.h"
52
53 #define VHOST_SCSI_VERSION  "v0.1"
54 #define VHOST_SCSI_NAMELEN 256
55 #define VHOST_SCSI_MAX_CDB_SIZE 32
56 #define VHOST_SCSI_DEFAULT_TAGS 256
57 #define VHOST_SCSI_PREALLOC_SGLS 2048
58 #define VHOST_SCSI_PREALLOC_UPAGES 2048
59 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
60
61 struct vhost_scsi_inflight {
62         /* Wait for the flush operation to finish */
63         struct completion comp;
64         /* Refcount for the inflight reqs */
65         struct kref kref;
66 };
67
68 struct vhost_scsi_cmd {
69         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
70         int tvc_vq_desc;
71         /* virtio-scsi initiator task attribute */
72         int tvc_task_attr;
73         /* virtio-scsi response incoming iovecs */
74         int tvc_in_iovs;
75         /* virtio-scsi initiator data direction */
76         enum dma_data_direction tvc_data_direction;
77         /* Expected data transfer length from virtio-scsi header */
78         u32 tvc_exp_data_len;
79         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80         u64 tvc_tag;
81         /* The number of scatterlists associated with this cmd */
82         u32 tvc_sgl_count;
83         u32 tvc_prot_sgl_count;
84         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
85         u32 tvc_lun;
86         /* Pointer to the SGL formatted memory from virtio-scsi */
87         struct scatterlist *tvc_sgl;
88         struct scatterlist *tvc_prot_sgl;
89         struct page **tvc_upages;
90         /* Pointer to response header iovec */
91         struct iovec tvc_resp_iov;
92         /* Pointer to vhost_scsi for our device */
93         struct vhost_scsi *tvc_vhost;
94         /* Pointer to vhost_virtqueue for the cmd */
95         struct vhost_virtqueue *tvc_vq;
96         /* Pointer to vhost nexus memory */
97         struct vhost_scsi_nexus *tvc_nexus;
98         /* The TCM I/O descriptor that is accessed via container_of() */
99         struct se_cmd tvc_se_cmd;
100         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
101         struct work_struct work;
102         /* Copy of the incoming SCSI command descriptor block (CDB) */
103         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
104         /* Sense buffer that will be mapped into outgoing status */
105         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106         /* Completed commands list, serviced from vhost worker thread */
107         struct llist_node tvc_completion_list;
108         /* Used to track inflight cmd */
109         struct vhost_scsi_inflight *inflight;
110 };
111
112 struct vhost_scsi_nexus {
113         /* Pointer to TCM session for I_T Nexus */
114         struct se_session *tvn_se_sess;
115 };
116
117 struct vhost_scsi_tpg {
118         /* Vhost port target portal group tag for TCM */
119         u16 tport_tpgt;
120         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
121         int tv_tpg_port_count;
122         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
123         int tv_tpg_vhost_count;
124         /* Used for enabling T10-PI with legacy devices */
125         int tv_fabric_prot_type;
126         /* list for vhost_scsi_list */
127         struct list_head tv_tpg_list;
128         /* Used to protect access for tpg_nexus */
129         struct mutex tv_tpg_mutex;
130         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
131         struct vhost_scsi_nexus *tpg_nexus;
132         /* Pointer back to vhost_scsi_tport */
133         struct vhost_scsi_tport *tport;
134         /* Returned by vhost_scsi_make_tpg() */
135         struct se_portal_group se_tpg;
136         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
137         struct vhost_scsi *vhost_scsi;
138 };
139
140 struct vhost_scsi_tport {
141         /* SCSI protocol the tport is providing */
142         u8 tport_proto_id;
143         /* Binary World Wide unique Port Name for Vhost Target port */
144         u64 tport_wwpn;
145         /* ASCII formatted WWPN for Vhost Target port */
146         char tport_name[VHOST_SCSI_NAMELEN];
147         /* Returned by vhost_scsi_make_tport() */
148         struct se_wwn tport_wwn;
149 };
150
151 struct vhost_scsi_evt {
152         /* event to be sent to guest */
153         struct virtio_scsi_event event;
154         /* event list, serviced from vhost worker thread */
155         struct llist_node list;
156 };
157
158 enum {
159         VHOST_SCSI_VQ_CTL = 0,
160         VHOST_SCSI_VQ_EVT = 1,
161         VHOST_SCSI_VQ_IO = 2,
162 };
163
164 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
165 enum {
166         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
167                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
168 };
169
170 #define VHOST_SCSI_MAX_TARGET   256
171 #define VHOST_SCSI_MAX_VQ       128
172 #define VHOST_SCSI_MAX_EVENT    128
173
174 struct vhost_scsi_virtqueue {
175         struct vhost_virtqueue vq;
176         /*
177          * Reference counting for inflight reqs, used for flush operation. At
178          * each time, one reference tracks new commands submitted, while we
179          * wait for another one to reach 0.
180          */
181         struct vhost_scsi_inflight inflights[2];
182         /*
183          * Indicate current inflight in use, protected by vq->mutex.
184          * Writers must also take dev mutex and flush under it.
185          */
186         int inflight_idx;
187 };
188
189 struct vhost_scsi {
190         /* Protected by vhost_scsi->dev.mutex */
191         struct vhost_scsi_tpg **vs_tpg;
192         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
193
194         struct vhost_dev dev;
195         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
196
197         struct vhost_work vs_completion_work; /* cmd completion work item */
198         struct llist_head vs_completion_list; /* cmd completion queue */
199
200         struct vhost_work vs_event_work; /* evt injection work item */
201         struct llist_head vs_event_list; /* evt injection queue */
202
203         bool vs_events_missed; /* any missed events, protected by vq->mutex */
204         int vs_events_nr; /* num of pending events, protected by vq->mutex */
205 };
206
207 static struct workqueue_struct *vhost_scsi_workqueue;
208
209 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
210 static DEFINE_MUTEX(vhost_scsi_mutex);
211 static LIST_HEAD(vhost_scsi_list);
212
213 static void vhost_scsi_done_inflight(struct kref *kref)
214 {
215         struct vhost_scsi_inflight *inflight;
216
217         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
218         complete(&inflight->comp);
219 }
220
221 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
222                                     struct vhost_scsi_inflight *old_inflight[])
223 {
224         struct vhost_scsi_inflight *new_inflight;
225         struct vhost_virtqueue *vq;
226         int idx, i;
227
228         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
229                 vq = &vs->vqs[i].vq;
230
231                 mutex_lock(&vq->mutex);
232
233                 /* store old infight */
234                 idx = vs->vqs[i].inflight_idx;
235                 if (old_inflight)
236                         old_inflight[i] = &vs->vqs[i].inflights[idx];
237
238                 /* setup new infight */
239                 vs->vqs[i].inflight_idx = idx ^ 1;
240                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
241                 kref_init(&new_inflight->kref);
242                 init_completion(&new_inflight->comp);
243
244                 mutex_unlock(&vq->mutex);
245         }
246 }
247
248 static struct vhost_scsi_inflight *
249 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
250 {
251         struct vhost_scsi_inflight *inflight;
252         struct vhost_scsi_virtqueue *svq;
253
254         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
255         inflight = &svq->inflights[svq->inflight_idx];
256         kref_get(&inflight->kref);
257
258         return inflight;
259 }
260
261 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
262 {
263         kref_put(&inflight->kref, vhost_scsi_done_inflight);
264 }
265
266 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
267 {
268         return 1;
269 }
270
271 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
272 {
273         return 0;
274 }
275
276 static char *vhost_scsi_get_fabric_name(void)
277 {
278         return "vhost";
279 }
280
281 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
282 {
283         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
284                                 struct vhost_scsi_tpg, se_tpg);
285         struct vhost_scsi_tport *tport = tpg->tport;
286
287         return &tport->tport_name[0];
288 }
289
290 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
291 {
292         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
293                                 struct vhost_scsi_tpg, se_tpg);
294         return tpg->tport_tpgt;
295 }
296
297 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
298 {
299         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
300                                 struct vhost_scsi_tpg, se_tpg);
301
302         return tpg->tv_fabric_prot_type;
303 }
304
305 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
306 {
307         return 1;
308 }
309
310 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
311 {
312         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
313                                 struct vhost_scsi_cmd, tvc_se_cmd);
314         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
315         int i;
316
317         if (tv_cmd->tvc_sgl_count) {
318                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
319                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
320         }
321         if (tv_cmd->tvc_prot_sgl_count) {
322                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
323                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
324         }
325
326         vhost_scsi_put_inflight(tv_cmd->inflight);
327         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
328 }
329
330 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
331 {
332         return 0;
333 }
334
335 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
336 {
337         /* Go ahead and process the write immediately */
338         target_execute_cmd(se_cmd);
339         return 0;
340 }
341
342 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
343 {
344         return 0;
345 }
346
347 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
348 {
349         return;
350 }
351
352 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
353 {
354         return 0;
355 }
356
357 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
358 {
359         struct vhost_scsi *vs = cmd->tvc_vhost;
360
361         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
362
363         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
364 }
365
366 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
367 {
368         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
369                                 struct vhost_scsi_cmd, tvc_se_cmd);
370         vhost_scsi_complete_cmd(cmd);
371         return 0;
372 }
373
374 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
375 {
376         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
377                                 struct vhost_scsi_cmd, tvc_se_cmd);
378         vhost_scsi_complete_cmd(cmd);
379         return 0;
380 }
381
382 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
383 {
384         return;
385 }
386
387 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
388 {
389         return;
390 }
391
392 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
393 {
394         vs->vs_events_nr--;
395         kfree(evt);
396 }
397
398 static struct vhost_scsi_evt *
399 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
400                        u32 event, u32 reason)
401 {
402         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
403         struct vhost_scsi_evt *evt;
404
405         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
406                 vs->vs_events_missed = true;
407                 return NULL;
408         }
409
410         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
411         if (!evt) {
412                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
413                 vs->vs_events_missed = true;
414                 return NULL;
415         }
416
417         evt->event.event = cpu_to_vhost32(vq, event);
418         evt->event.reason = cpu_to_vhost32(vq, reason);
419         vs->vs_events_nr++;
420
421         return evt;
422 }
423
424 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
425 {
426         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
427
428         /* TODO locking against target/backend threads? */
429         transport_generic_free_cmd(se_cmd, 0);
430
431 }
432
433 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
434 {
435         return target_put_sess_cmd(se_cmd);
436 }
437
438 static void
439 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
440 {
441         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
442         struct virtio_scsi_event *event = &evt->event;
443         struct virtio_scsi_event __user *eventp;
444         unsigned out, in;
445         int head, ret;
446
447         if (!vq->private_data) {
448                 vs->vs_events_missed = true;
449                 return;
450         }
451
452 again:
453         vhost_disable_notify(&vs->dev, vq);
454         head = vhost_get_vq_desc(vq, vq->iov,
455                         ARRAY_SIZE(vq->iov), &out, &in,
456                         NULL, NULL);
457         if (head < 0) {
458                 vs->vs_events_missed = true;
459                 return;
460         }
461         if (head == vq->num) {
462                 if (vhost_enable_notify(&vs->dev, vq))
463                         goto again;
464                 vs->vs_events_missed = true;
465                 return;
466         }
467
468         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
469                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
470                                 vq->iov[out].iov_len);
471                 vs->vs_events_missed = true;
472                 return;
473         }
474
475         if (vs->vs_events_missed) {
476                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
477                 vs->vs_events_missed = false;
478         }
479
480         eventp = vq->iov[out].iov_base;
481         ret = __copy_to_user(eventp, event, sizeof(*event));
482         if (!ret)
483                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
484         else
485                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
486 }
487
488 static void vhost_scsi_evt_work(struct vhost_work *work)
489 {
490         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
491                                         vs_event_work);
492         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
493         struct vhost_scsi_evt *evt, *t;
494         struct llist_node *llnode;
495
496         mutex_lock(&vq->mutex);
497         llnode = llist_del_all(&vs->vs_event_list);
498         llist_for_each_entry_safe(evt, t, llnode, list) {
499                 vhost_scsi_do_evt_work(vs, evt);
500                 vhost_scsi_free_evt(vs, evt);
501         }
502         mutex_unlock(&vq->mutex);
503 }
504
505 /* Fill in status and signal that we are done processing this command
506  *
507  * This is scheduled in the vhost work queue so we are called with the owner
508  * process mm and can access the vring.
509  */
510 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
511 {
512         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
513                                         vs_completion_work);
514         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
515         struct virtio_scsi_cmd_resp v_rsp;
516         struct vhost_scsi_cmd *cmd, *t;
517         struct llist_node *llnode;
518         struct se_cmd *se_cmd;
519         struct iov_iter iov_iter;
520         int ret, vq;
521
522         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
523         llnode = llist_del_all(&vs->vs_completion_list);
524         llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
525                 se_cmd = &cmd->tvc_se_cmd;
526
527                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
528                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
529
530                 memset(&v_rsp, 0, sizeof(v_rsp));
531                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
532                 /* TODO is status_qualifier field needed? */
533                 v_rsp.status = se_cmd->scsi_status;
534                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
535                                                  se_cmd->scsi_sense_length);
536                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
537                        se_cmd->scsi_sense_length);
538
539                 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
540                               cmd->tvc_in_iovs, sizeof(v_rsp));
541                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
542                 if (likely(ret == sizeof(v_rsp))) {
543                         struct vhost_scsi_virtqueue *q;
544                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
545                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
546                         vq = q - vs->vqs;
547                         __set_bit(vq, signal);
548                 } else
549                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
550
551                 vhost_scsi_free_cmd(cmd);
552         }
553
554         vq = -1;
555         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
556                 < VHOST_SCSI_MAX_VQ)
557                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
558 }
559
560 static struct vhost_scsi_cmd *
561 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
562                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
563                    u32 exp_data_len, int data_direction)
564 {
565         struct vhost_scsi_cmd *cmd;
566         struct vhost_scsi_nexus *tv_nexus;
567         struct se_session *se_sess;
568         struct scatterlist *sg, *prot_sg;
569         struct page **pages;
570         int tag;
571
572         tv_nexus = tpg->tpg_nexus;
573         if (!tv_nexus) {
574                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
575                 return ERR_PTR(-EIO);
576         }
577         se_sess = tv_nexus->tvn_se_sess;
578
579         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
580         if (tag < 0) {
581                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
582                 return ERR_PTR(-ENOMEM);
583         }
584
585         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
586         sg = cmd->tvc_sgl;
587         prot_sg = cmd->tvc_prot_sgl;
588         pages = cmd->tvc_upages;
589         memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
590
591         cmd->tvc_sgl = sg;
592         cmd->tvc_prot_sgl = prot_sg;
593         cmd->tvc_upages = pages;
594         cmd->tvc_se_cmd.map_tag = tag;
595         cmd->tvc_tag = scsi_tag;
596         cmd->tvc_lun = lun;
597         cmd->tvc_task_attr = task_attr;
598         cmd->tvc_exp_data_len = exp_data_len;
599         cmd->tvc_data_direction = data_direction;
600         cmd->tvc_nexus = tv_nexus;
601         cmd->inflight = vhost_scsi_get_inflight(vq);
602
603         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
604
605         return cmd;
606 }
607
608 /*
609  * Map a user memory range into a scatterlist
610  *
611  * Returns the number of scatterlist entries used or -errno on error.
612  */
613 static int
614 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
615                       struct iov_iter *iter,
616                       struct scatterlist *sgl,
617                       bool write)
618 {
619         struct page **pages = cmd->tvc_upages;
620         struct scatterlist *sg = sgl;
621         ssize_t bytes;
622         size_t offset;
623         unsigned int npages = 0;
624
625         bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
626                                 VHOST_SCSI_PREALLOC_UPAGES, &offset);
627         /* No pages were pinned */
628         if (bytes <= 0)
629                 return bytes < 0 ? bytes : -EFAULT;
630
631         iov_iter_advance(iter, bytes);
632
633         while (bytes) {
634                 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
635                 sg_set_page(sg++, pages[npages++], n, offset);
636                 bytes -= n;
637                 offset = 0;
638         }
639         return npages;
640 }
641
642 static int
643 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
644 {
645         int sgl_count = 0;
646
647         if (!iter || !iter->iov) {
648                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
649                        " present\n", __func__, bytes);
650                 return -EINVAL;
651         }
652
653         sgl_count = iov_iter_npages(iter, 0xffff);
654         if (sgl_count > max_sgls) {
655                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
656                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
657                 return -EINVAL;
658         }
659         return sgl_count;
660 }
661
662 static int
663 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
664                       struct iov_iter *iter,
665                       struct scatterlist *sg, int sg_count)
666 {
667         struct scatterlist *p = sg;
668         int ret;
669
670         while (iov_iter_count(iter)) {
671                 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
672                 if (ret < 0) {
673                         while (p < sg) {
674                                 struct page *page = sg_page(p++);
675                                 if (page)
676                                         put_page(page);
677                         }
678                         return ret;
679                 }
680                 sg += ret;
681         }
682         return 0;
683 }
684
685 static int
686 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
687                  size_t prot_bytes, struct iov_iter *prot_iter,
688                  size_t data_bytes, struct iov_iter *data_iter)
689 {
690         int sgl_count, ret;
691         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
692
693         if (prot_bytes) {
694                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
695                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
696                 if (sgl_count < 0)
697                         return sgl_count;
698
699                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
700                 cmd->tvc_prot_sgl_count = sgl_count;
701                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
702                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
703
704                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
705                                             cmd->tvc_prot_sgl,
706                                             cmd->tvc_prot_sgl_count);
707                 if (ret < 0) {
708                         cmd->tvc_prot_sgl_count = 0;
709                         return ret;
710                 }
711         }
712         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
713                                          VHOST_SCSI_PREALLOC_SGLS);
714         if (sgl_count < 0)
715                 return sgl_count;
716
717         sg_init_table(cmd->tvc_sgl, sgl_count);
718         cmd->tvc_sgl_count = sgl_count;
719         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
720                   cmd->tvc_sgl, cmd->tvc_sgl_count);
721
722         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
723                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
724         if (ret < 0) {
725                 cmd->tvc_sgl_count = 0;
726                 return ret;
727         }
728         return 0;
729 }
730
731 static int vhost_scsi_to_tcm_attr(int attr)
732 {
733         switch (attr) {
734         case VIRTIO_SCSI_S_SIMPLE:
735                 return TCM_SIMPLE_TAG;
736         case VIRTIO_SCSI_S_ORDERED:
737                 return TCM_ORDERED_TAG;
738         case VIRTIO_SCSI_S_HEAD:
739                 return TCM_HEAD_TAG;
740         case VIRTIO_SCSI_S_ACA:
741                 return TCM_ACA_TAG;
742         default:
743                 break;
744         }
745         return TCM_SIMPLE_TAG;
746 }
747
748 static void vhost_scsi_submission_work(struct work_struct *work)
749 {
750         struct vhost_scsi_cmd *cmd =
751                 container_of(work, struct vhost_scsi_cmd, work);
752         struct vhost_scsi_nexus *tv_nexus;
753         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
754         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
755         int rc;
756
757         /* FIXME: BIDI operation */
758         if (cmd->tvc_sgl_count) {
759                 sg_ptr = cmd->tvc_sgl;
760
761                 if (cmd->tvc_prot_sgl_count)
762                         sg_prot_ptr = cmd->tvc_prot_sgl;
763                 else
764                         se_cmd->prot_pto = true;
765         } else {
766                 sg_ptr = NULL;
767         }
768         tv_nexus = cmd->tvc_nexus;
769
770         se_cmd->tag = 0;
771         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
772                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
773                         cmd->tvc_lun, cmd->tvc_exp_data_len,
774                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
775                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
776                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
777                         cmd->tvc_prot_sgl_count);
778         if (rc < 0) {
779                 transport_send_check_condition_and_sense(se_cmd,
780                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
781                 transport_generic_free_cmd(se_cmd, 0);
782         }
783 }
784
785 static void
786 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
787                            struct vhost_virtqueue *vq,
788                            int head, unsigned out)
789 {
790         struct virtio_scsi_cmd_resp __user *resp;
791         struct virtio_scsi_cmd_resp rsp;
792         int ret;
793
794         memset(&rsp, 0, sizeof(rsp));
795         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
796         resp = vq->iov[out].iov_base;
797         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
798         if (!ret)
799                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
800         else
801                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
802 }
803
804 static void
805 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
806 {
807         struct vhost_scsi_tpg **vs_tpg, *tpg;
808         struct virtio_scsi_cmd_req v_req;
809         struct virtio_scsi_cmd_req_pi v_req_pi;
810         struct vhost_scsi_cmd *cmd;
811         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
812         u64 tag;
813         u32 exp_data_len, data_direction;
814         unsigned int out = 0, in = 0;
815         int head, ret, prot_bytes;
816         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
817         size_t out_size, in_size;
818         u16 lun;
819         u8 *target, *lunp, task_attr;
820         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
821         void *req, *cdb;
822
823         mutex_lock(&vq->mutex);
824         /*
825          * We can handle the vq only after the endpoint is setup by calling the
826          * VHOST_SCSI_SET_ENDPOINT ioctl.
827          */
828         vs_tpg = vq->private_data;
829         if (!vs_tpg)
830                 goto out;
831
832         vhost_disable_notify(&vs->dev, vq);
833
834         for (;;) {
835                 head = vhost_get_vq_desc(vq, vq->iov,
836                                          ARRAY_SIZE(vq->iov), &out, &in,
837                                          NULL, NULL);
838                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
839                          head, out, in);
840                 /* On error, stop handling until the next kick. */
841                 if (unlikely(head < 0))
842                         break;
843                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
844                 if (head == vq->num) {
845                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
846                                 vhost_disable_notify(&vs->dev, vq);
847                                 continue;
848                         }
849                         break;
850                 }
851                 /*
852                  * Check for a sane response buffer so we can report early
853                  * errors back to the guest.
854                  */
855                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
856                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
857                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
858                         break;
859                 }
860                 /*
861                  * Setup pointers and values based upon different virtio-scsi
862                  * request header if T10_PI is enabled in KVM guest.
863                  */
864                 if (t10_pi) {
865                         req = &v_req_pi;
866                         req_size = sizeof(v_req_pi);
867                         lunp = &v_req_pi.lun[0];
868                         target = &v_req_pi.lun[1];
869                 } else {
870                         req = &v_req;
871                         req_size = sizeof(v_req);
872                         lunp = &v_req.lun[0];
873                         target = &v_req.lun[1];
874                 }
875                 /*
876                  * FIXME: Not correct for BIDI operation
877                  */
878                 out_size = iov_length(vq->iov, out);
879                 in_size = iov_length(&vq->iov[out], in);
880
881                 /*
882                  * Copy over the virtio-scsi request header, which for a
883                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
884                  * single iovec may contain both the header + outgoing
885                  * WRITE payloads.
886                  *
887                  * copy_from_iter() will advance out_iter, so that it will
888                  * point at the start of the outgoing WRITE payload, if
889                  * DMA_TO_DEVICE is set.
890                  */
891                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
892
893                 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
894                         vq_err(vq, "Faulted on copy_from_iter\n");
895                         vhost_scsi_send_bad_target(vs, vq, head, out);
896                         continue;
897                 }
898                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
899                 if (unlikely(*lunp != 1)) {
900                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
901                         vhost_scsi_send_bad_target(vs, vq, head, out);
902                         continue;
903                 }
904
905                 tpg = READ_ONCE(vs_tpg[*target]);
906                 if (unlikely(!tpg)) {
907                         /* Target does not exist, fail the request */
908                         vhost_scsi_send_bad_target(vs, vq, head, out);
909                         continue;
910                 }
911                 /*
912                  * Determine data_direction by calculating the total outgoing
913                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
914                  * response headers respectively.
915                  *
916                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
917                  * to the right place.
918                  *
919                  * For DMA_FROM_DEVICE, the iovec will be just past the end
920                  * of the virtio-scsi response header in either the same
921                  * or immediately following iovec.
922                  *
923                  * Any associated T10_PI bytes for the outgoing / incoming
924                  * payloads are included in calculation of exp_data_len here.
925                  */
926                 prot_bytes = 0;
927
928                 if (out_size > req_size) {
929                         data_direction = DMA_TO_DEVICE;
930                         exp_data_len = out_size - req_size;
931                         data_iter = out_iter;
932                 } else if (in_size > rsp_size) {
933                         data_direction = DMA_FROM_DEVICE;
934                         exp_data_len = in_size - rsp_size;
935
936                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
937                                       rsp_size + exp_data_len);
938                         iov_iter_advance(&in_iter, rsp_size);
939                         data_iter = in_iter;
940                 } else {
941                         data_direction = DMA_NONE;
942                         exp_data_len = 0;
943                 }
944                 /*
945                  * If T10_PI header + payload is present, setup prot_iter values
946                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
947                  * host scatterlists via get_user_pages_fast().
948                  */
949                 if (t10_pi) {
950                         if (v_req_pi.pi_bytesout) {
951                                 if (data_direction != DMA_TO_DEVICE) {
952                                         vq_err(vq, "Received non zero pi_bytesout,"
953                                                 " but wrong data_direction\n");
954                                         vhost_scsi_send_bad_target(vs, vq, head, out);
955                                         continue;
956                                 }
957                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
958                         } else if (v_req_pi.pi_bytesin) {
959                                 if (data_direction != DMA_FROM_DEVICE) {
960                                         vq_err(vq, "Received non zero pi_bytesin,"
961                                                 " but wrong data_direction\n");
962                                         vhost_scsi_send_bad_target(vs, vq, head, out);
963                                         continue;
964                                 }
965                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
966                         }
967                         /*
968                          * Set prot_iter to data_iter, and advance past any
969                          * preceeding prot_bytes that may be present.
970                          *
971                          * Also fix up the exp_data_len to reflect only the
972                          * actual data payload length.
973                          */
974                         if (prot_bytes) {
975                                 exp_data_len -= prot_bytes;
976                                 prot_iter = data_iter;
977                                 iov_iter_advance(&data_iter, prot_bytes);
978                         }
979                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
980                         task_attr = v_req_pi.task_attr;
981                         cdb = &v_req_pi.cdb[0];
982                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
983                 } else {
984                         tag = vhost64_to_cpu(vq, v_req.tag);
985                         task_attr = v_req.task_attr;
986                         cdb = &v_req.cdb[0];
987                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
988                 }
989                 /*
990                  * Check that the received CDB size does not exceeded our
991                  * hardcoded max for vhost-scsi, then get a pre-allocated
992                  * cmd descriptor for the new virtio-scsi tag.
993                  *
994                  * TODO what if cdb was too small for varlen cdb header?
995                  */
996                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
997                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
998                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
999                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1000                         vhost_scsi_send_bad_target(vs, vq, head, out);
1001                         continue;
1002                 }
1003                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1004                                          exp_data_len + prot_bytes,
1005                                          data_direction);
1006                 if (IS_ERR(cmd)) {
1007                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1008                                PTR_ERR(cmd));
1009                         vhost_scsi_send_bad_target(vs, vq, head, out);
1010                         continue;
1011                 }
1012                 cmd->tvc_vhost = vs;
1013                 cmd->tvc_vq = vq;
1014                 cmd->tvc_resp_iov = vq->iov[out];
1015                 cmd->tvc_in_iovs = in;
1016
1017                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1018                          cmd->tvc_cdb[0], cmd->tvc_lun);
1019                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1020                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1021
1022                 if (data_direction != DMA_NONE) {
1023                         ret = vhost_scsi_mapal(cmd,
1024                                                prot_bytes, &prot_iter,
1025                                                exp_data_len, &data_iter);
1026                         if (unlikely(ret)) {
1027                                 vq_err(vq, "Failed to map iov to sgl\n");
1028                                 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1029                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1030                                 continue;
1031                         }
1032                 }
1033                 /*
1034                  * Save the descriptor from vhost_get_vq_desc() to be used to
1035                  * complete the virtio-scsi request in TCM callback context via
1036                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1037                  */
1038                 cmd->tvc_vq_desc = head;
1039                 /*
1040                  * Dispatch cmd descriptor for cmwq execution in process
1041                  * context provided by vhost_scsi_workqueue.  This also ensures
1042                  * cmd is executed on the same kworker CPU as this vhost
1043                  * thread to gain positive L2 cache locality effects.
1044                  */
1045                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1046                 queue_work(vhost_scsi_workqueue, &cmd->work);
1047         }
1048 out:
1049         mutex_unlock(&vq->mutex);
1050 }
1051
1052 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1053 {
1054         pr_debug("%s: The handling func for control queue.\n", __func__);
1055 }
1056
1057 static void
1058 vhost_scsi_send_evt(struct vhost_scsi *vs,
1059                    struct vhost_scsi_tpg *tpg,
1060                    struct se_lun *lun,
1061                    u32 event,
1062                    u32 reason)
1063 {
1064         struct vhost_scsi_evt *evt;
1065
1066         evt = vhost_scsi_allocate_evt(vs, event, reason);
1067         if (!evt)
1068                 return;
1069
1070         if (tpg && lun) {
1071                 /* TODO: share lun setup code with virtio-scsi.ko */
1072                 /*
1073                  * Note: evt->event is zeroed when we allocate it and
1074                  * lun[4-7] need to be zero according to virtio-scsi spec.
1075                  */
1076                 evt->event.lun[0] = 0x01;
1077                 evt->event.lun[1] = tpg->tport_tpgt;
1078                 if (lun->unpacked_lun >= 256)
1079                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1080                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1081         }
1082
1083         llist_add(&evt->list, &vs->vs_event_list);
1084         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1085 }
1086
1087 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1088 {
1089         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1090                                                 poll.work);
1091         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1092
1093         mutex_lock(&vq->mutex);
1094         if (!vq->private_data)
1095                 goto out;
1096
1097         if (vs->vs_events_missed)
1098                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1099 out:
1100         mutex_unlock(&vq->mutex);
1101 }
1102
1103 static void vhost_scsi_handle_kick(struct vhost_work *work)
1104 {
1105         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1106                                                 poll.work);
1107         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1108
1109         vhost_scsi_handle_vq(vs, vq);
1110 }
1111
1112 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1113 {
1114         vhost_poll_flush(&vs->vqs[index].vq.poll);
1115 }
1116
1117 /* Callers must hold dev mutex */
1118 static void vhost_scsi_flush(struct vhost_scsi *vs)
1119 {
1120         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1121         int i;
1122
1123         /* Init new inflight and remember the old inflight */
1124         vhost_scsi_init_inflight(vs, old_inflight);
1125
1126         /*
1127          * The inflight->kref was initialized to 1. We decrement it here to
1128          * indicate the start of the flush operation so that it will reach 0
1129          * when all the reqs are finished.
1130          */
1131         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1132                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1133
1134         /* Flush both the vhost poll and vhost work */
1135         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1136                 vhost_scsi_flush_vq(vs, i);
1137         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1138         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1139
1140         /* Wait for all reqs issued before the flush to be finished */
1141         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1142                 wait_for_completion(&old_inflight[i]->comp);
1143 }
1144
1145 /*
1146  * Called from vhost_scsi_ioctl() context to walk the list of available
1147  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1148  *
1149  *  The lock nesting rule is:
1150  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1151  */
1152 static int
1153 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1154                         struct vhost_scsi_target *t)
1155 {
1156         struct se_portal_group *se_tpg;
1157         struct vhost_scsi_tport *tv_tport;
1158         struct vhost_scsi_tpg *tpg;
1159         struct vhost_scsi_tpg **vs_tpg;
1160         struct vhost_virtqueue *vq;
1161         int index, ret, i, len;
1162         bool match = false;
1163
1164         mutex_lock(&vhost_scsi_mutex);
1165         mutex_lock(&vs->dev.mutex);
1166
1167         /* Verify that ring has been setup correctly. */
1168         for (index = 0; index < vs->dev.nvqs; ++index) {
1169                 /* Verify that ring has been setup correctly. */
1170                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1171                         ret = -EFAULT;
1172                         goto out;
1173                 }
1174         }
1175
1176         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1177         vs_tpg = kzalloc(len, GFP_KERNEL);
1178         if (!vs_tpg) {
1179                 ret = -ENOMEM;
1180                 goto out;
1181         }
1182         if (vs->vs_tpg)
1183                 memcpy(vs_tpg, vs->vs_tpg, len);
1184
1185         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1186                 mutex_lock(&tpg->tv_tpg_mutex);
1187                 if (!tpg->tpg_nexus) {
1188                         mutex_unlock(&tpg->tv_tpg_mutex);
1189                         continue;
1190                 }
1191                 if (tpg->tv_tpg_vhost_count != 0) {
1192                         mutex_unlock(&tpg->tv_tpg_mutex);
1193                         continue;
1194                 }
1195                 tv_tport = tpg->tport;
1196
1197                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1198                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1199                                 kfree(vs_tpg);
1200                                 mutex_unlock(&tpg->tv_tpg_mutex);
1201                                 ret = -EEXIST;
1202                                 goto out;
1203                         }
1204                         /*
1205                          * In order to ensure individual vhost-scsi configfs
1206                          * groups cannot be removed while in use by vhost ioctl,
1207                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1208                          * dependency now.
1209                          */
1210                         se_tpg = &tpg->se_tpg;
1211                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1212                         if (ret) {
1213                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1214                                 kfree(vs_tpg);
1215                                 mutex_unlock(&tpg->tv_tpg_mutex);
1216                                 goto out;
1217                         }
1218                         tpg->tv_tpg_vhost_count++;
1219                         tpg->vhost_scsi = vs;
1220                         vs_tpg[tpg->tport_tpgt] = tpg;
1221                         smp_mb__after_atomic();
1222                         match = true;
1223                 }
1224                 mutex_unlock(&tpg->tv_tpg_mutex);
1225         }
1226
1227         if (match) {
1228                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1229                        sizeof(vs->vs_vhost_wwpn));
1230                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1231                         vq = &vs->vqs[i].vq;
1232                         mutex_lock(&vq->mutex);
1233                         vq->private_data = vs_tpg;
1234                         vhost_vq_init_access(vq);
1235                         mutex_unlock(&vq->mutex);
1236                 }
1237                 ret = 0;
1238         } else {
1239                 ret = -EEXIST;
1240         }
1241
1242         /*
1243          * Act as synchronize_rcu to make sure access to
1244          * old vs->vs_tpg is finished.
1245          */
1246         vhost_scsi_flush(vs);
1247         kfree(vs->vs_tpg);
1248         vs->vs_tpg = vs_tpg;
1249
1250 out:
1251         mutex_unlock(&vs->dev.mutex);
1252         mutex_unlock(&vhost_scsi_mutex);
1253         return ret;
1254 }
1255
1256 static int
1257 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1258                           struct vhost_scsi_target *t)
1259 {
1260         struct se_portal_group *se_tpg;
1261         struct vhost_scsi_tport *tv_tport;
1262         struct vhost_scsi_tpg *tpg;
1263         struct vhost_virtqueue *vq;
1264         bool match = false;
1265         int index, ret, i;
1266         u8 target;
1267
1268         mutex_lock(&vhost_scsi_mutex);
1269         mutex_lock(&vs->dev.mutex);
1270         /* Verify that ring has been setup correctly. */
1271         for (index = 0; index < vs->dev.nvqs; ++index) {
1272                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1273                         ret = -EFAULT;
1274                         goto err_dev;
1275                 }
1276         }
1277
1278         if (!vs->vs_tpg) {
1279                 ret = 0;
1280                 goto err_dev;
1281         }
1282
1283         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1284                 target = i;
1285                 tpg = vs->vs_tpg[target];
1286                 if (!tpg)
1287                         continue;
1288
1289                 mutex_lock(&tpg->tv_tpg_mutex);
1290                 tv_tport = tpg->tport;
1291                 if (!tv_tport) {
1292                         ret = -ENODEV;
1293                         goto err_tpg;
1294                 }
1295
1296                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1297                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1298                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1299                                 tv_tport->tport_name, tpg->tport_tpgt,
1300                                 t->vhost_wwpn, t->vhost_tpgt);
1301                         ret = -EINVAL;
1302                         goto err_tpg;
1303                 }
1304                 tpg->tv_tpg_vhost_count--;
1305                 tpg->vhost_scsi = NULL;
1306                 vs->vs_tpg[target] = NULL;
1307                 match = true;
1308                 mutex_unlock(&tpg->tv_tpg_mutex);
1309                 /*
1310                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1311                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1312                  */
1313                 se_tpg = &tpg->se_tpg;
1314                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1315         }
1316         if (match) {
1317                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1318                         vq = &vs->vqs[i].vq;
1319                         mutex_lock(&vq->mutex);
1320                         vq->private_data = NULL;
1321                         mutex_unlock(&vq->mutex);
1322                 }
1323         }
1324         /*
1325          * Act as synchronize_rcu to make sure access to
1326          * old vs->vs_tpg is finished.
1327          */
1328         vhost_scsi_flush(vs);
1329         kfree(vs->vs_tpg);
1330         vs->vs_tpg = NULL;
1331         WARN_ON(vs->vs_events_nr);
1332         mutex_unlock(&vs->dev.mutex);
1333         mutex_unlock(&vhost_scsi_mutex);
1334         return 0;
1335
1336 err_tpg:
1337         mutex_unlock(&tpg->tv_tpg_mutex);
1338 err_dev:
1339         mutex_unlock(&vs->dev.mutex);
1340         mutex_unlock(&vhost_scsi_mutex);
1341         return ret;
1342 }
1343
1344 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1345 {
1346         struct vhost_virtqueue *vq;
1347         int i;
1348
1349         if (features & ~VHOST_SCSI_FEATURES)
1350                 return -EOPNOTSUPP;
1351
1352         mutex_lock(&vs->dev.mutex);
1353         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1354             !vhost_log_access_ok(&vs->dev)) {
1355                 mutex_unlock(&vs->dev.mutex);
1356                 return -EFAULT;
1357         }
1358
1359         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1360                 vq = &vs->vqs[i].vq;
1361                 mutex_lock(&vq->mutex);
1362                 vq->acked_features = features;
1363                 mutex_unlock(&vq->mutex);
1364         }
1365         mutex_unlock(&vs->dev.mutex);
1366         return 0;
1367 }
1368
1369 static int vhost_scsi_open(struct inode *inode, struct file *f)
1370 {
1371         struct vhost_scsi *vs;
1372         struct vhost_virtqueue **vqs;
1373         int r = -ENOMEM, i;
1374
1375         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1376         if (!vs) {
1377                 vs = vzalloc(sizeof(*vs));
1378                 if (!vs)
1379                         goto err_vs;
1380         }
1381
1382         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1383         if (!vqs)
1384                 goto err_vqs;
1385
1386         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1387         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1388
1389         vs->vs_events_nr = 0;
1390         vs->vs_events_missed = false;
1391
1392         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1393         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1394         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1395         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1396         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1397                 vqs[i] = &vs->vqs[i].vq;
1398                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1399         }
1400         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1401
1402         vhost_scsi_init_inflight(vs, NULL);
1403
1404         f->private_data = vs;
1405         return 0;
1406
1407 err_vqs:
1408         kvfree(vs);
1409 err_vs:
1410         return r;
1411 }
1412
1413 static int vhost_scsi_release(struct inode *inode, struct file *f)
1414 {
1415         struct vhost_scsi *vs = f->private_data;
1416         struct vhost_scsi_target t;
1417
1418         mutex_lock(&vs->dev.mutex);
1419         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1420         mutex_unlock(&vs->dev.mutex);
1421         vhost_scsi_clear_endpoint(vs, &t);
1422         vhost_dev_stop(&vs->dev);
1423         vhost_dev_cleanup(&vs->dev, false);
1424         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1425         vhost_scsi_flush(vs);
1426         kfree(vs->dev.vqs);
1427         kvfree(vs);
1428         return 0;
1429 }
1430
1431 static long
1432 vhost_scsi_ioctl(struct file *f,
1433                  unsigned int ioctl,
1434                  unsigned long arg)
1435 {
1436         struct vhost_scsi *vs = f->private_data;
1437         struct vhost_scsi_target backend;
1438         void __user *argp = (void __user *)arg;
1439         u64 __user *featurep = argp;
1440         u32 __user *eventsp = argp;
1441         u32 events_missed;
1442         u64 features;
1443         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1444         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1445
1446         switch (ioctl) {
1447         case VHOST_SCSI_SET_ENDPOINT:
1448                 if (copy_from_user(&backend, argp, sizeof backend))
1449                         return -EFAULT;
1450                 if (backend.reserved != 0)
1451                         return -EOPNOTSUPP;
1452
1453                 return vhost_scsi_set_endpoint(vs, &backend);
1454         case VHOST_SCSI_CLEAR_ENDPOINT:
1455                 if (copy_from_user(&backend, argp, sizeof backend))
1456                         return -EFAULT;
1457                 if (backend.reserved != 0)
1458                         return -EOPNOTSUPP;
1459
1460                 return vhost_scsi_clear_endpoint(vs, &backend);
1461         case VHOST_SCSI_GET_ABI_VERSION:
1462                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1463                         return -EFAULT;
1464                 return 0;
1465         case VHOST_SCSI_SET_EVENTS_MISSED:
1466                 if (get_user(events_missed, eventsp))
1467                         return -EFAULT;
1468                 mutex_lock(&vq->mutex);
1469                 vs->vs_events_missed = events_missed;
1470                 mutex_unlock(&vq->mutex);
1471                 return 0;
1472         case VHOST_SCSI_GET_EVENTS_MISSED:
1473                 mutex_lock(&vq->mutex);
1474                 events_missed = vs->vs_events_missed;
1475                 mutex_unlock(&vq->mutex);
1476                 if (put_user(events_missed, eventsp))
1477                         return -EFAULT;
1478                 return 0;
1479         case VHOST_GET_FEATURES:
1480                 features = VHOST_SCSI_FEATURES;
1481                 if (copy_to_user(featurep, &features, sizeof features))
1482                         return -EFAULT;
1483                 return 0;
1484         case VHOST_SET_FEATURES:
1485                 if (copy_from_user(&features, featurep, sizeof features))
1486                         return -EFAULT;
1487                 return vhost_scsi_set_features(vs, features);
1488         default:
1489                 mutex_lock(&vs->dev.mutex);
1490                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1491                 /* TODO: flush backend after dev ioctl. */
1492                 if (r == -ENOIOCTLCMD)
1493                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1494                 mutex_unlock(&vs->dev.mutex);
1495                 return r;
1496         }
1497 }
1498
1499 #ifdef CONFIG_COMPAT
1500 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1501                                 unsigned long arg)
1502 {
1503         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1504 }
1505 #endif
1506
1507 static const struct file_operations vhost_scsi_fops = {
1508         .owner          = THIS_MODULE,
1509         .release        = vhost_scsi_release,
1510         .unlocked_ioctl = vhost_scsi_ioctl,
1511 #ifdef CONFIG_COMPAT
1512         .compat_ioctl   = vhost_scsi_compat_ioctl,
1513 #endif
1514         .open           = vhost_scsi_open,
1515         .llseek         = noop_llseek,
1516 };
1517
1518 static struct miscdevice vhost_scsi_misc = {
1519         MISC_DYNAMIC_MINOR,
1520         "vhost-scsi",
1521         &vhost_scsi_fops,
1522 };
1523
1524 static int __init vhost_scsi_register(void)
1525 {
1526         return misc_register(&vhost_scsi_misc);
1527 }
1528
1529 static void vhost_scsi_deregister(void)
1530 {
1531         misc_deregister(&vhost_scsi_misc);
1532 }
1533
1534 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1535 {
1536         switch (tport->tport_proto_id) {
1537         case SCSI_PROTOCOL_SAS:
1538                 return "SAS";
1539         case SCSI_PROTOCOL_FCP:
1540                 return "FCP";
1541         case SCSI_PROTOCOL_ISCSI:
1542                 return "iSCSI";
1543         default:
1544                 break;
1545         }
1546
1547         return "Unknown";
1548 }
1549
1550 static void
1551 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1552                   struct se_lun *lun, bool plug)
1553 {
1554
1555         struct vhost_scsi *vs = tpg->vhost_scsi;
1556         struct vhost_virtqueue *vq;
1557         u32 reason;
1558
1559         if (!vs)
1560                 return;
1561
1562         mutex_lock(&vs->dev.mutex);
1563
1564         if (plug)
1565                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1566         else
1567                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1568
1569         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1570         mutex_lock(&vq->mutex);
1571         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1572                 vhost_scsi_send_evt(vs, tpg, lun,
1573                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1574         mutex_unlock(&vq->mutex);
1575         mutex_unlock(&vs->dev.mutex);
1576 }
1577
1578 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1579 {
1580         vhost_scsi_do_plug(tpg, lun, true);
1581 }
1582
1583 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1584 {
1585         vhost_scsi_do_plug(tpg, lun, false);
1586 }
1587
1588 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1589                                struct se_lun *lun)
1590 {
1591         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1592                                 struct vhost_scsi_tpg, se_tpg);
1593
1594         mutex_lock(&vhost_scsi_mutex);
1595
1596         mutex_lock(&tpg->tv_tpg_mutex);
1597         tpg->tv_tpg_port_count++;
1598         mutex_unlock(&tpg->tv_tpg_mutex);
1599
1600         vhost_scsi_hotplug(tpg, lun);
1601
1602         mutex_unlock(&vhost_scsi_mutex);
1603
1604         return 0;
1605 }
1606
1607 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1608                                   struct se_lun *lun)
1609 {
1610         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1611                                 struct vhost_scsi_tpg, se_tpg);
1612
1613         mutex_lock(&vhost_scsi_mutex);
1614
1615         mutex_lock(&tpg->tv_tpg_mutex);
1616         tpg->tv_tpg_port_count--;
1617         mutex_unlock(&tpg->tv_tpg_mutex);
1618
1619         vhost_scsi_hotunplug(tpg, lun);
1620
1621         mutex_unlock(&vhost_scsi_mutex);
1622 }
1623
1624 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1625 {
1626         struct vhost_scsi_cmd *tv_cmd;
1627         unsigned int i;
1628
1629         if (!se_sess->sess_cmd_map)
1630                 return;
1631
1632         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1633                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1634
1635                 kfree(tv_cmd->tvc_sgl);
1636                 kfree(tv_cmd->tvc_prot_sgl);
1637                 kfree(tv_cmd->tvc_upages);
1638         }
1639 }
1640
1641 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1642                 struct config_item *item, const char *page, size_t count)
1643 {
1644         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1645         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1646                                 struct vhost_scsi_tpg, se_tpg);
1647         unsigned long val;
1648         int ret = kstrtoul(page, 0, &val);
1649
1650         if (ret) {
1651                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1652                 return ret;
1653         }
1654         if (val != 0 && val != 1 && val != 3) {
1655                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1656                 return -EINVAL;
1657         }
1658         tpg->tv_fabric_prot_type = val;
1659
1660         return count;
1661 }
1662
1663 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1664                 struct config_item *item, char *page)
1665 {
1666         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1667         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1668                                 struct vhost_scsi_tpg, se_tpg);
1669
1670         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1671 }
1672
1673 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1674
1675 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1676         &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1677         NULL,
1678 };
1679
1680 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1681                                struct se_session *se_sess, void *p)
1682 {
1683         struct vhost_scsi_cmd *tv_cmd;
1684         unsigned int i;
1685
1686         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1687                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1688
1689                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1690                                         VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1691                 if (!tv_cmd->tvc_sgl) {
1692                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1693                         goto out;
1694                 }
1695
1696                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1697                                 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1698                 if (!tv_cmd->tvc_upages) {
1699                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1700                         goto out;
1701                 }
1702
1703                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1704                                 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1705                 if (!tv_cmd->tvc_prot_sgl) {
1706                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1707                         goto out;
1708                 }
1709         }
1710         return 0;
1711 out:
1712         vhost_scsi_free_cmd_map_res(se_sess);
1713         return -ENOMEM;
1714 }
1715
1716 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1717                                 const char *name)
1718 {
1719         struct vhost_scsi_nexus *tv_nexus;
1720
1721         mutex_lock(&tpg->tv_tpg_mutex);
1722         if (tpg->tpg_nexus) {
1723                 mutex_unlock(&tpg->tv_tpg_mutex);
1724                 pr_debug("tpg->tpg_nexus already exists\n");
1725                 return -EEXIST;
1726         }
1727
1728         tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1729         if (!tv_nexus) {
1730                 mutex_unlock(&tpg->tv_tpg_mutex);
1731                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1732                 return -ENOMEM;
1733         }
1734         /*
1735          * Since we are running in 'demo mode' this call with generate a
1736          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1737          * the SCSI Initiator port name of the passed configfs group 'name'.
1738          */
1739         tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1740                                         VHOST_SCSI_DEFAULT_TAGS,
1741                                         sizeof(struct vhost_scsi_cmd),
1742                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1743                                         (unsigned char *)name, tv_nexus,
1744                                         vhost_scsi_nexus_cb);
1745         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1746                 mutex_unlock(&tpg->tv_tpg_mutex);
1747                 kfree(tv_nexus);
1748                 return -ENOMEM;
1749         }
1750         tpg->tpg_nexus = tv_nexus;
1751
1752         mutex_unlock(&tpg->tv_tpg_mutex);
1753         return 0;
1754 }
1755
1756 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1757 {
1758         struct se_session *se_sess;
1759         struct vhost_scsi_nexus *tv_nexus;
1760
1761         mutex_lock(&tpg->tv_tpg_mutex);
1762         tv_nexus = tpg->tpg_nexus;
1763         if (!tv_nexus) {
1764                 mutex_unlock(&tpg->tv_tpg_mutex);
1765                 return -ENODEV;
1766         }
1767
1768         se_sess = tv_nexus->tvn_se_sess;
1769         if (!se_sess) {
1770                 mutex_unlock(&tpg->tv_tpg_mutex);
1771                 return -ENODEV;
1772         }
1773
1774         if (tpg->tv_tpg_port_count != 0) {
1775                 mutex_unlock(&tpg->tv_tpg_mutex);
1776                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1777                         " active TPG port count: %d\n",
1778                         tpg->tv_tpg_port_count);
1779                 return -EBUSY;
1780         }
1781
1782         if (tpg->tv_tpg_vhost_count != 0) {
1783                 mutex_unlock(&tpg->tv_tpg_mutex);
1784                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1785                         " active TPG vhost count: %d\n",
1786                         tpg->tv_tpg_vhost_count);
1787                 return -EBUSY;
1788         }
1789
1790         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1791                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1792                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1793
1794         vhost_scsi_free_cmd_map_res(se_sess);
1795         /*
1796          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1797          */
1798         transport_deregister_session(tv_nexus->tvn_se_sess);
1799         tpg->tpg_nexus = NULL;
1800         mutex_unlock(&tpg->tv_tpg_mutex);
1801
1802         kfree(tv_nexus);
1803         return 0;
1804 }
1805
1806 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1807 {
1808         struct se_portal_group *se_tpg = to_tpg(item);
1809         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1810                                 struct vhost_scsi_tpg, se_tpg);
1811         struct vhost_scsi_nexus *tv_nexus;
1812         ssize_t ret;
1813
1814         mutex_lock(&tpg->tv_tpg_mutex);
1815         tv_nexus = tpg->tpg_nexus;
1816         if (!tv_nexus) {
1817                 mutex_unlock(&tpg->tv_tpg_mutex);
1818                 return -ENODEV;
1819         }
1820         ret = snprintf(page, PAGE_SIZE, "%s\n",
1821                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1822         mutex_unlock(&tpg->tv_tpg_mutex);
1823
1824         return ret;
1825 }
1826
1827 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1828                 const char *page, size_t count)
1829 {
1830         struct se_portal_group *se_tpg = to_tpg(item);
1831         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1832                                 struct vhost_scsi_tpg, se_tpg);
1833         struct vhost_scsi_tport *tport_wwn = tpg->tport;
1834         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1835         int ret;
1836         /*
1837          * Shutdown the active I_T nexus if 'NULL' is passed..
1838          */
1839         if (!strncmp(page, "NULL", 4)) {
1840                 ret = vhost_scsi_drop_nexus(tpg);
1841                 return (!ret) ? count : ret;
1842         }
1843         /*
1844          * Otherwise make sure the passed virtual Initiator port WWN matches
1845          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1846          * vhost_scsi_make_nexus().
1847          */
1848         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1849                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1850                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1851                 return -EINVAL;
1852         }
1853         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1854
1855         ptr = strstr(i_port, "naa.");
1856         if (ptr) {
1857                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1858                         pr_err("Passed SAS Initiator Port %s does not"
1859                                 " match target port protoid: %s\n", i_port,
1860                                 vhost_scsi_dump_proto_id(tport_wwn));
1861                         return -EINVAL;
1862                 }
1863                 port_ptr = &i_port[0];
1864                 goto check_newline;
1865         }
1866         ptr = strstr(i_port, "fc.");
1867         if (ptr) {
1868                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1869                         pr_err("Passed FCP Initiator Port %s does not"
1870                                 " match target port protoid: %s\n", i_port,
1871                                 vhost_scsi_dump_proto_id(tport_wwn));
1872                         return -EINVAL;
1873                 }
1874                 port_ptr = &i_port[3]; /* Skip over "fc." */
1875                 goto check_newline;
1876         }
1877         ptr = strstr(i_port, "iqn.");
1878         if (ptr) {
1879                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1880                         pr_err("Passed iSCSI Initiator Port %s does not"
1881                                 " match target port protoid: %s\n", i_port,
1882                                 vhost_scsi_dump_proto_id(tport_wwn));
1883                         return -EINVAL;
1884                 }
1885                 port_ptr = &i_port[0];
1886                 goto check_newline;
1887         }
1888         pr_err("Unable to locate prefix for emulated Initiator Port:"
1889                         " %s\n", i_port);
1890         return -EINVAL;
1891         /*
1892          * Clear any trailing newline for the NAA WWN
1893          */
1894 check_newline:
1895         if (i_port[strlen(i_port)-1] == '\n')
1896                 i_port[strlen(i_port)-1] = '\0';
1897
1898         ret = vhost_scsi_make_nexus(tpg, port_ptr);
1899         if (ret < 0)
1900                 return ret;
1901
1902         return count;
1903 }
1904
1905 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1906
1907 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1908         &vhost_scsi_tpg_attr_nexus,
1909         NULL,
1910 };
1911
1912 static struct se_portal_group *
1913 vhost_scsi_make_tpg(struct se_wwn *wwn,
1914                    struct config_group *group,
1915                    const char *name)
1916 {
1917         struct vhost_scsi_tport *tport = container_of(wwn,
1918                         struct vhost_scsi_tport, tport_wwn);
1919
1920         struct vhost_scsi_tpg *tpg;
1921         u16 tpgt;
1922         int ret;
1923
1924         if (strstr(name, "tpgt_") != name)
1925                 return ERR_PTR(-EINVAL);
1926         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1927                 return ERR_PTR(-EINVAL);
1928
1929         tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1930         if (!tpg) {
1931                 pr_err("Unable to allocate struct vhost_scsi_tpg");
1932                 return ERR_PTR(-ENOMEM);
1933         }
1934         mutex_init(&tpg->tv_tpg_mutex);
1935         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1936         tpg->tport = tport;
1937         tpg->tport_tpgt = tpgt;
1938
1939         ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1940         if (ret < 0) {
1941                 kfree(tpg);
1942                 return NULL;
1943         }
1944         mutex_lock(&vhost_scsi_mutex);
1945         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1946         mutex_unlock(&vhost_scsi_mutex);
1947
1948         return &tpg->se_tpg;
1949 }
1950
1951 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1952 {
1953         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1954                                 struct vhost_scsi_tpg, se_tpg);
1955
1956         mutex_lock(&vhost_scsi_mutex);
1957         list_del(&tpg->tv_tpg_list);
1958         mutex_unlock(&vhost_scsi_mutex);
1959         /*
1960          * Release the virtual I_T Nexus for this vhost TPG
1961          */
1962         vhost_scsi_drop_nexus(tpg);
1963         /*
1964          * Deregister the se_tpg from TCM..
1965          */
1966         core_tpg_deregister(se_tpg);
1967         kfree(tpg);
1968 }
1969
1970 static struct se_wwn *
1971 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
1972                      struct config_group *group,
1973                      const char *name)
1974 {
1975         struct vhost_scsi_tport *tport;
1976         char *ptr;
1977         u64 wwpn = 0;
1978         int off = 0;
1979
1980         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1981                 return ERR_PTR(-EINVAL); */
1982
1983         tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
1984         if (!tport) {
1985                 pr_err("Unable to allocate struct vhost_scsi_tport");
1986                 return ERR_PTR(-ENOMEM);
1987         }
1988         tport->tport_wwpn = wwpn;
1989         /*
1990          * Determine the emulated Protocol Identifier and Target Port Name
1991          * based on the incoming configfs directory name.
1992          */
1993         ptr = strstr(name, "naa.");
1994         if (ptr) {
1995                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1996                 goto check_len;
1997         }
1998         ptr = strstr(name, "fc.");
1999         if (ptr) {
2000                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2001                 off = 3; /* Skip over "fc." */
2002                 goto check_len;
2003         }
2004         ptr = strstr(name, "iqn.");
2005         if (ptr) {
2006                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2007                 goto check_len;
2008         }
2009
2010         pr_err("Unable to locate prefix for emulated Target Port:"
2011                         " %s\n", name);
2012         kfree(tport);
2013         return ERR_PTR(-EINVAL);
2014
2015 check_len:
2016         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2017                 pr_err("Emulated %s Address: %s, exceeds"
2018                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2019                         VHOST_SCSI_NAMELEN);
2020                 kfree(tport);
2021                 return ERR_PTR(-EINVAL);
2022         }
2023         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2024
2025         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2026                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2027
2028         return &tport->tport_wwn;
2029 }
2030
2031 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2032 {
2033         struct vhost_scsi_tport *tport = container_of(wwn,
2034                                 struct vhost_scsi_tport, tport_wwn);
2035
2036         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2037                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2038                 tport->tport_name);
2039
2040         kfree(tport);
2041 }
2042
2043 static ssize_t
2044 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2045 {
2046         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2047                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2048                 utsname()->machine);
2049 }
2050
2051 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2052
2053 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2054         &vhost_scsi_wwn_attr_version,
2055         NULL,
2056 };
2057
2058 static const struct target_core_fabric_ops vhost_scsi_ops = {
2059         .module                         = THIS_MODULE,
2060         .name                           = "vhost",
2061         .get_fabric_name                = vhost_scsi_get_fabric_name,
2062         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2063         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2064         .tpg_check_demo_mode            = vhost_scsi_check_true,
2065         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2066         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2067         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2068         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2069         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2070         .release_cmd                    = vhost_scsi_release_cmd,
2071         .check_stop_free                = vhost_scsi_check_stop_free,
2072         .sess_get_index                 = vhost_scsi_sess_get_index,
2073         .sess_get_initiator_sid         = NULL,
2074         .write_pending                  = vhost_scsi_write_pending,
2075         .write_pending_status           = vhost_scsi_write_pending_status,
2076         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2077         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2078         .queue_data_in                  = vhost_scsi_queue_data_in,
2079         .queue_status                   = vhost_scsi_queue_status,
2080         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2081         .aborted_task                   = vhost_scsi_aborted_task,
2082         /*
2083          * Setup callers for generic logic in target_core_fabric_configfs.c
2084          */
2085         .fabric_make_wwn                = vhost_scsi_make_tport,
2086         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2087         .fabric_make_tpg                = vhost_scsi_make_tpg,
2088         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2089         .fabric_post_link               = vhost_scsi_port_link,
2090         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2091
2092         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2093         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2094         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2095 };
2096
2097 static int __init vhost_scsi_init(void)
2098 {
2099         int ret = -ENOMEM;
2100
2101         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2102                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2103                 utsname()->machine);
2104
2105         /*
2106          * Use our own dedicated workqueue for submitting I/O into
2107          * target core to avoid contention within system_wq.
2108          */
2109         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2110         if (!vhost_scsi_workqueue)
2111                 goto out;
2112
2113         ret = vhost_scsi_register();
2114         if (ret < 0)
2115                 goto out_destroy_workqueue;
2116
2117         ret = target_register_template(&vhost_scsi_ops);
2118         if (ret < 0)
2119                 goto out_vhost_scsi_deregister;
2120
2121         return 0;
2122
2123 out_vhost_scsi_deregister:
2124         vhost_scsi_deregister();
2125 out_destroy_workqueue:
2126         destroy_workqueue(vhost_scsi_workqueue);
2127 out:
2128         return ret;
2129 };
2130
2131 static void vhost_scsi_exit(void)
2132 {
2133         target_unregister_template(&vhost_scsi_ops);
2134         vhost_scsi_deregister();
2135         destroy_workqueue(vhost_scsi_workqueue);
2136 };
2137
2138 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2139 MODULE_ALIAS("tcm_vhost");
2140 MODULE_LICENSE("GPL");
2141 module_init(vhost_scsi_init);
2142 module_exit(vhost_scsi_exit);