Merge tag 'xtensa-20181101' of git://github.com/jcmvbkbc/linux-xtensa
[muen/linux.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49
50 #include "vhost.h"
51
52 #define VHOST_SCSI_VERSION  "v0.1"
53 #define VHOST_SCSI_NAMELEN 256
54 #define VHOST_SCSI_MAX_CDB_SIZE 32
55 #define VHOST_SCSI_DEFAULT_TAGS 256
56 #define VHOST_SCSI_PREALLOC_SGLS 2048
57 #define VHOST_SCSI_PREALLOC_UPAGES 2048
58 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
59
60 struct vhost_scsi_inflight {
61         /* Wait for the flush operation to finish */
62         struct completion comp;
63         /* Refcount for the inflight reqs */
64         struct kref kref;
65 };
66
67 struct vhost_scsi_cmd {
68         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
69         int tvc_vq_desc;
70         /* virtio-scsi initiator task attribute */
71         int tvc_task_attr;
72         /* virtio-scsi response incoming iovecs */
73         int tvc_in_iovs;
74         /* virtio-scsi initiator data direction */
75         enum dma_data_direction tvc_data_direction;
76         /* Expected data transfer length from virtio-scsi header */
77         u32 tvc_exp_data_len;
78         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
79         u64 tvc_tag;
80         /* The number of scatterlists associated with this cmd */
81         u32 tvc_sgl_count;
82         u32 tvc_prot_sgl_count;
83         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
84         u32 tvc_lun;
85         /* Pointer to the SGL formatted memory from virtio-scsi */
86         struct scatterlist *tvc_sgl;
87         struct scatterlist *tvc_prot_sgl;
88         struct page **tvc_upages;
89         /* Pointer to response header iovec */
90         struct iovec tvc_resp_iov;
91         /* Pointer to vhost_scsi for our device */
92         struct vhost_scsi *tvc_vhost;
93         /* Pointer to vhost_virtqueue for the cmd */
94         struct vhost_virtqueue *tvc_vq;
95         /* Pointer to vhost nexus memory */
96         struct vhost_scsi_nexus *tvc_nexus;
97         /* The TCM I/O descriptor that is accessed via container_of() */
98         struct se_cmd tvc_se_cmd;
99         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
100         struct work_struct work;
101         /* Copy of the incoming SCSI command descriptor block (CDB) */
102         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
103         /* Sense buffer that will be mapped into outgoing status */
104         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
105         /* Completed commands list, serviced from vhost worker thread */
106         struct llist_node tvc_completion_list;
107         /* Used to track inflight cmd */
108         struct vhost_scsi_inflight *inflight;
109 };
110
111 struct vhost_scsi_nexus {
112         /* Pointer to TCM session for I_T Nexus */
113         struct se_session *tvn_se_sess;
114 };
115
116 struct vhost_scsi_tpg {
117         /* Vhost port target portal group tag for TCM */
118         u16 tport_tpgt;
119         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
120         int tv_tpg_port_count;
121         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
122         int tv_tpg_vhost_count;
123         /* Used for enabling T10-PI with legacy devices */
124         int tv_fabric_prot_type;
125         /* list for vhost_scsi_list */
126         struct list_head tv_tpg_list;
127         /* Used to protect access for tpg_nexus */
128         struct mutex tv_tpg_mutex;
129         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
130         struct vhost_scsi_nexus *tpg_nexus;
131         /* Pointer back to vhost_scsi_tport */
132         struct vhost_scsi_tport *tport;
133         /* Returned by vhost_scsi_make_tpg() */
134         struct se_portal_group se_tpg;
135         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
136         struct vhost_scsi *vhost_scsi;
137 };
138
139 struct vhost_scsi_tport {
140         /* SCSI protocol the tport is providing */
141         u8 tport_proto_id;
142         /* Binary World Wide unique Port Name for Vhost Target port */
143         u64 tport_wwpn;
144         /* ASCII formatted WWPN for Vhost Target port */
145         char tport_name[VHOST_SCSI_NAMELEN];
146         /* Returned by vhost_scsi_make_tport() */
147         struct se_wwn tport_wwn;
148 };
149
150 struct vhost_scsi_evt {
151         /* event to be sent to guest */
152         struct virtio_scsi_event event;
153         /* event list, serviced from vhost worker thread */
154         struct llist_node list;
155 };
156
157 enum {
158         VHOST_SCSI_VQ_CTL = 0,
159         VHOST_SCSI_VQ_EVT = 1,
160         VHOST_SCSI_VQ_IO = 2,
161 };
162
163 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
164 enum {
165         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
166                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
167 };
168
169 #define VHOST_SCSI_MAX_TARGET   256
170 #define VHOST_SCSI_MAX_VQ       128
171 #define VHOST_SCSI_MAX_EVENT    128
172
173 struct vhost_scsi_virtqueue {
174         struct vhost_virtqueue vq;
175         /*
176          * Reference counting for inflight reqs, used for flush operation. At
177          * each time, one reference tracks new commands submitted, while we
178          * wait for another one to reach 0.
179          */
180         struct vhost_scsi_inflight inflights[2];
181         /*
182          * Indicate current inflight in use, protected by vq->mutex.
183          * Writers must also take dev mutex and flush under it.
184          */
185         int inflight_idx;
186 };
187
188 struct vhost_scsi {
189         /* Protected by vhost_scsi->dev.mutex */
190         struct vhost_scsi_tpg **vs_tpg;
191         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
192
193         struct vhost_dev dev;
194         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
195
196         struct vhost_work vs_completion_work; /* cmd completion work item */
197         struct llist_head vs_completion_list; /* cmd completion queue */
198
199         struct vhost_work vs_event_work; /* evt injection work item */
200         struct llist_head vs_event_list; /* evt injection queue */
201
202         bool vs_events_missed; /* any missed events, protected by vq->mutex */
203         int vs_events_nr; /* num of pending events, protected by vq->mutex */
204 };
205
206 static struct workqueue_struct *vhost_scsi_workqueue;
207
208 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
209 static DEFINE_MUTEX(vhost_scsi_mutex);
210 static LIST_HEAD(vhost_scsi_list);
211
212 static void vhost_scsi_done_inflight(struct kref *kref)
213 {
214         struct vhost_scsi_inflight *inflight;
215
216         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
217         complete(&inflight->comp);
218 }
219
220 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
221                                     struct vhost_scsi_inflight *old_inflight[])
222 {
223         struct vhost_scsi_inflight *new_inflight;
224         struct vhost_virtqueue *vq;
225         int idx, i;
226
227         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
228                 vq = &vs->vqs[i].vq;
229
230                 mutex_lock(&vq->mutex);
231
232                 /* store old infight */
233                 idx = vs->vqs[i].inflight_idx;
234                 if (old_inflight)
235                         old_inflight[i] = &vs->vqs[i].inflights[idx];
236
237                 /* setup new infight */
238                 vs->vqs[i].inflight_idx = idx ^ 1;
239                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
240                 kref_init(&new_inflight->kref);
241                 init_completion(&new_inflight->comp);
242
243                 mutex_unlock(&vq->mutex);
244         }
245 }
246
247 static struct vhost_scsi_inflight *
248 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
249 {
250         struct vhost_scsi_inflight *inflight;
251         struct vhost_scsi_virtqueue *svq;
252
253         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
254         inflight = &svq->inflights[svq->inflight_idx];
255         kref_get(&inflight->kref);
256
257         return inflight;
258 }
259
260 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
261 {
262         kref_put(&inflight->kref, vhost_scsi_done_inflight);
263 }
264
265 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
266 {
267         return 1;
268 }
269
270 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
271 {
272         return 0;
273 }
274
275 static char *vhost_scsi_get_fabric_name(void)
276 {
277         return "vhost";
278 }
279
280 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
281 {
282         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
283                                 struct vhost_scsi_tpg, se_tpg);
284         struct vhost_scsi_tport *tport = tpg->tport;
285
286         return &tport->tport_name[0];
287 }
288
289 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
290 {
291         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
292                                 struct vhost_scsi_tpg, se_tpg);
293         return tpg->tport_tpgt;
294 }
295
296 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
297 {
298         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
299                                 struct vhost_scsi_tpg, se_tpg);
300
301         return tpg->tv_fabric_prot_type;
302 }
303
304 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
305 {
306         return 1;
307 }
308
309 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
310 {
311         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
312                                 struct vhost_scsi_cmd, tvc_se_cmd);
313         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
314         int i;
315
316         if (tv_cmd->tvc_sgl_count) {
317                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
318                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
319         }
320         if (tv_cmd->tvc_prot_sgl_count) {
321                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
322                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
323         }
324
325         vhost_scsi_put_inflight(tv_cmd->inflight);
326         target_free_tag(se_sess, se_cmd);
327 }
328
329 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
330 {
331         return 0;
332 }
333
334 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
335 {
336         /* Go ahead and process the write immediately */
337         target_execute_cmd(se_cmd);
338         return 0;
339 }
340
341 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
342 {
343         return 0;
344 }
345
346 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
347 {
348         return;
349 }
350
351 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
352 {
353         return 0;
354 }
355
356 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
357 {
358         struct vhost_scsi *vs = cmd->tvc_vhost;
359
360         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
361
362         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
363 }
364
365 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
366 {
367         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
368                                 struct vhost_scsi_cmd, tvc_se_cmd);
369         vhost_scsi_complete_cmd(cmd);
370         return 0;
371 }
372
373 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
374 {
375         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
376                                 struct vhost_scsi_cmd, tvc_se_cmd);
377         vhost_scsi_complete_cmd(cmd);
378         return 0;
379 }
380
381 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
382 {
383         return;
384 }
385
386 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
387 {
388         return;
389 }
390
391 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
392 {
393         vs->vs_events_nr--;
394         kfree(evt);
395 }
396
397 static struct vhost_scsi_evt *
398 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
399                        u32 event, u32 reason)
400 {
401         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
402         struct vhost_scsi_evt *evt;
403
404         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
405                 vs->vs_events_missed = true;
406                 return NULL;
407         }
408
409         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
410         if (!evt) {
411                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
412                 vs->vs_events_missed = true;
413                 return NULL;
414         }
415
416         evt->event.event = cpu_to_vhost32(vq, event);
417         evt->event.reason = cpu_to_vhost32(vq, reason);
418         vs->vs_events_nr++;
419
420         return evt;
421 }
422
423 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
424 {
425         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
426
427         /* TODO locking against target/backend threads? */
428         transport_generic_free_cmd(se_cmd, 0);
429
430 }
431
432 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
433 {
434         return target_put_sess_cmd(se_cmd);
435 }
436
437 static void
438 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
439 {
440         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
441         struct virtio_scsi_event *event = &evt->event;
442         struct virtio_scsi_event __user *eventp;
443         unsigned out, in;
444         int head, ret;
445
446         if (!vq->private_data) {
447                 vs->vs_events_missed = true;
448                 return;
449         }
450
451 again:
452         vhost_disable_notify(&vs->dev, vq);
453         head = vhost_get_vq_desc(vq, vq->iov,
454                         ARRAY_SIZE(vq->iov), &out, &in,
455                         NULL, NULL);
456         if (head < 0) {
457                 vs->vs_events_missed = true;
458                 return;
459         }
460         if (head == vq->num) {
461                 if (vhost_enable_notify(&vs->dev, vq))
462                         goto again;
463                 vs->vs_events_missed = true;
464                 return;
465         }
466
467         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
468                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
469                                 vq->iov[out].iov_len);
470                 vs->vs_events_missed = true;
471                 return;
472         }
473
474         if (vs->vs_events_missed) {
475                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
476                 vs->vs_events_missed = false;
477         }
478
479         eventp = vq->iov[out].iov_base;
480         ret = __copy_to_user(eventp, event, sizeof(*event));
481         if (!ret)
482                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
483         else
484                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
485 }
486
487 static void vhost_scsi_evt_work(struct vhost_work *work)
488 {
489         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
490                                         vs_event_work);
491         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
492         struct vhost_scsi_evt *evt, *t;
493         struct llist_node *llnode;
494
495         mutex_lock(&vq->mutex);
496         llnode = llist_del_all(&vs->vs_event_list);
497         llist_for_each_entry_safe(evt, t, llnode, list) {
498                 vhost_scsi_do_evt_work(vs, evt);
499                 vhost_scsi_free_evt(vs, evt);
500         }
501         mutex_unlock(&vq->mutex);
502 }
503
504 /* Fill in status and signal that we are done processing this command
505  *
506  * This is scheduled in the vhost work queue so we are called with the owner
507  * process mm and can access the vring.
508  */
509 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
510 {
511         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
512                                         vs_completion_work);
513         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
514         struct virtio_scsi_cmd_resp v_rsp;
515         struct vhost_scsi_cmd *cmd, *t;
516         struct llist_node *llnode;
517         struct se_cmd *se_cmd;
518         struct iov_iter iov_iter;
519         int ret, vq;
520
521         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
522         llnode = llist_del_all(&vs->vs_completion_list);
523         llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
524                 se_cmd = &cmd->tvc_se_cmd;
525
526                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
527                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
528
529                 memset(&v_rsp, 0, sizeof(v_rsp));
530                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
531                 /* TODO is status_qualifier field needed? */
532                 v_rsp.status = se_cmd->scsi_status;
533                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
534                                                  se_cmd->scsi_sense_length);
535                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
536                        se_cmd->scsi_sense_length);
537
538                 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
539                               cmd->tvc_in_iovs, sizeof(v_rsp));
540                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
541                 if (likely(ret == sizeof(v_rsp))) {
542                         struct vhost_scsi_virtqueue *q;
543                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
544                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
545                         vq = q - vs->vqs;
546                         __set_bit(vq, signal);
547                 } else
548                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
549
550                 vhost_scsi_free_cmd(cmd);
551         }
552
553         vq = -1;
554         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
555                 < VHOST_SCSI_MAX_VQ)
556                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
557 }
558
559 static struct vhost_scsi_cmd *
560 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
561                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
562                    u32 exp_data_len, int data_direction)
563 {
564         struct vhost_scsi_cmd *cmd;
565         struct vhost_scsi_nexus *tv_nexus;
566         struct se_session *se_sess;
567         struct scatterlist *sg, *prot_sg;
568         struct page **pages;
569         int tag, cpu;
570
571         tv_nexus = tpg->tpg_nexus;
572         if (!tv_nexus) {
573                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
574                 return ERR_PTR(-EIO);
575         }
576         se_sess = tv_nexus->tvn_se_sess;
577
578         tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
579         if (tag < 0) {
580                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
581                 return ERR_PTR(-ENOMEM);
582         }
583
584         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
585         sg = cmd->tvc_sgl;
586         prot_sg = cmd->tvc_prot_sgl;
587         pages = cmd->tvc_upages;
588         memset(cmd, 0, sizeof(*cmd));
589         cmd->tvc_sgl = sg;
590         cmd->tvc_prot_sgl = prot_sg;
591         cmd->tvc_upages = pages;
592         cmd->tvc_se_cmd.map_tag = tag;
593         cmd->tvc_se_cmd.map_cpu = cpu;
594         cmd->tvc_tag = scsi_tag;
595         cmd->tvc_lun = lun;
596         cmd->tvc_task_attr = task_attr;
597         cmd->tvc_exp_data_len = exp_data_len;
598         cmd->tvc_data_direction = data_direction;
599         cmd->tvc_nexus = tv_nexus;
600         cmd->inflight = vhost_scsi_get_inflight(vq);
601
602         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
603
604         return cmd;
605 }
606
607 /*
608  * Map a user memory range into a scatterlist
609  *
610  * Returns the number of scatterlist entries used or -errno on error.
611  */
612 static int
613 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
614                       struct iov_iter *iter,
615                       struct scatterlist *sgl,
616                       bool write)
617 {
618         struct page **pages = cmd->tvc_upages;
619         struct scatterlist *sg = sgl;
620         ssize_t bytes;
621         size_t offset;
622         unsigned int npages = 0;
623
624         bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
625                                 VHOST_SCSI_PREALLOC_UPAGES, &offset);
626         /* No pages were pinned */
627         if (bytes <= 0)
628                 return bytes < 0 ? bytes : -EFAULT;
629
630         iov_iter_advance(iter, bytes);
631
632         while (bytes) {
633                 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
634                 sg_set_page(sg++, pages[npages++], n, offset);
635                 bytes -= n;
636                 offset = 0;
637         }
638         return npages;
639 }
640
641 static int
642 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
643 {
644         int sgl_count = 0;
645
646         if (!iter || !iter->iov) {
647                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
648                        " present\n", __func__, bytes);
649                 return -EINVAL;
650         }
651
652         sgl_count = iov_iter_npages(iter, 0xffff);
653         if (sgl_count > max_sgls) {
654                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
655                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
656                 return -EINVAL;
657         }
658         return sgl_count;
659 }
660
661 static int
662 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
663                       struct iov_iter *iter,
664                       struct scatterlist *sg, int sg_count)
665 {
666         struct scatterlist *p = sg;
667         int ret;
668
669         while (iov_iter_count(iter)) {
670                 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
671                 if (ret < 0) {
672                         while (p < sg) {
673                                 struct page *page = sg_page(p++);
674                                 if (page)
675                                         put_page(page);
676                         }
677                         return ret;
678                 }
679                 sg += ret;
680         }
681         return 0;
682 }
683
684 static int
685 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
686                  size_t prot_bytes, struct iov_iter *prot_iter,
687                  size_t data_bytes, struct iov_iter *data_iter)
688 {
689         int sgl_count, ret;
690         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
691
692         if (prot_bytes) {
693                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
694                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
695                 if (sgl_count < 0)
696                         return sgl_count;
697
698                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
699                 cmd->tvc_prot_sgl_count = sgl_count;
700                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
701                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
702
703                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
704                                             cmd->tvc_prot_sgl,
705                                             cmd->tvc_prot_sgl_count);
706                 if (ret < 0) {
707                         cmd->tvc_prot_sgl_count = 0;
708                         return ret;
709                 }
710         }
711         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
712                                          VHOST_SCSI_PREALLOC_SGLS);
713         if (sgl_count < 0)
714                 return sgl_count;
715
716         sg_init_table(cmd->tvc_sgl, sgl_count);
717         cmd->tvc_sgl_count = sgl_count;
718         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
719                   cmd->tvc_sgl, cmd->tvc_sgl_count);
720
721         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
722                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
723         if (ret < 0) {
724                 cmd->tvc_sgl_count = 0;
725                 return ret;
726         }
727         return 0;
728 }
729
730 static int vhost_scsi_to_tcm_attr(int attr)
731 {
732         switch (attr) {
733         case VIRTIO_SCSI_S_SIMPLE:
734                 return TCM_SIMPLE_TAG;
735         case VIRTIO_SCSI_S_ORDERED:
736                 return TCM_ORDERED_TAG;
737         case VIRTIO_SCSI_S_HEAD:
738                 return TCM_HEAD_TAG;
739         case VIRTIO_SCSI_S_ACA:
740                 return TCM_ACA_TAG;
741         default:
742                 break;
743         }
744         return TCM_SIMPLE_TAG;
745 }
746
747 static void vhost_scsi_submission_work(struct work_struct *work)
748 {
749         struct vhost_scsi_cmd *cmd =
750                 container_of(work, struct vhost_scsi_cmd, work);
751         struct vhost_scsi_nexus *tv_nexus;
752         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
753         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
754         int rc;
755
756         /* FIXME: BIDI operation */
757         if (cmd->tvc_sgl_count) {
758                 sg_ptr = cmd->tvc_sgl;
759
760                 if (cmd->tvc_prot_sgl_count)
761                         sg_prot_ptr = cmd->tvc_prot_sgl;
762                 else
763                         se_cmd->prot_pto = true;
764         } else {
765                 sg_ptr = NULL;
766         }
767         tv_nexus = cmd->tvc_nexus;
768
769         se_cmd->tag = 0;
770         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
771                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
772                         cmd->tvc_lun, cmd->tvc_exp_data_len,
773                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
774                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
775                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
776                         cmd->tvc_prot_sgl_count);
777         if (rc < 0) {
778                 transport_send_check_condition_and_sense(se_cmd,
779                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
780                 transport_generic_free_cmd(se_cmd, 0);
781         }
782 }
783
784 static void
785 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
786                            struct vhost_virtqueue *vq,
787                            int head, unsigned out)
788 {
789         struct virtio_scsi_cmd_resp __user *resp;
790         struct virtio_scsi_cmd_resp rsp;
791         int ret;
792
793         memset(&rsp, 0, sizeof(rsp));
794         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
795         resp = vq->iov[out].iov_base;
796         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
797         if (!ret)
798                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
799         else
800                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
801 }
802
803 static void
804 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
805 {
806         struct vhost_scsi_tpg **vs_tpg, *tpg;
807         struct virtio_scsi_cmd_req v_req;
808         struct virtio_scsi_cmd_req_pi v_req_pi;
809         struct vhost_scsi_cmd *cmd;
810         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
811         u64 tag;
812         u32 exp_data_len, data_direction;
813         unsigned int out = 0, in = 0;
814         int head, ret, prot_bytes;
815         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
816         size_t out_size, in_size;
817         u16 lun;
818         u8 *target, *lunp, task_attr;
819         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
820         void *req, *cdb;
821
822         mutex_lock(&vq->mutex);
823         /*
824          * We can handle the vq only after the endpoint is setup by calling the
825          * VHOST_SCSI_SET_ENDPOINT ioctl.
826          */
827         vs_tpg = vq->private_data;
828         if (!vs_tpg)
829                 goto out;
830
831         vhost_disable_notify(&vs->dev, vq);
832
833         for (;;) {
834                 head = vhost_get_vq_desc(vq, vq->iov,
835                                          ARRAY_SIZE(vq->iov), &out, &in,
836                                          NULL, NULL);
837                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
838                          head, out, in);
839                 /* On error, stop handling until the next kick. */
840                 if (unlikely(head < 0))
841                         break;
842                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
843                 if (head == vq->num) {
844                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
845                                 vhost_disable_notify(&vs->dev, vq);
846                                 continue;
847                         }
848                         break;
849                 }
850                 /*
851                  * Check for a sane response buffer so we can report early
852                  * errors back to the guest.
853                  */
854                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
855                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
856                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
857                         break;
858                 }
859                 /*
860                  * Setup pointers and values based upon different virtio-scsi
861                  * request header if T10_PI is enabled in KVM guest.
862                  */
863                 if (t10_pi) {
864                         req = &v_req_pi;
865                         req_size = sizeof(v_req_pi);
866                         lunp = &v_req_pi.lun[0];
867                         target = &v_req_pi.lun[1];
868                 } else {
869                         req = &v_req;
870                         req_size = sizeof(v_req);
871                         lunp = &v_req.lun[0];
872                         target = &v_req.lun[1];
873                 }
874                 /*
875                  * FIXME: Not correct for BIDI operation
876                  */
877                 out_size = iov_length(vq->iov, out);
878                 in_size = iov_length(&vq->iov[out], in);
879
880                 /*
881                  * Copy over the virtio-scsi request header, which for a
882                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
883                  * single iovec may contain both the header + outgoing
884                  * WRITE payloads.
885                  *
886                  * copy_from_iter() will advance out_iter, so that it will
887                  * point at the start of the outgoing WRITE payload, if
888                  * DMA_TO_DEVICE is set.
889                  */
890                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
891
892                 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
893                         vq_err(vq, "Faulted on copy_from_iter\n");
894                         vhost_scsi_send_bad_target(vs, vq, head, out);
895                         continue;
896                 }
897                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
898                 if (unlikely(*lunp != 1)) {
899                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
900                         vhost_scsi_send_bad_target(vs, vq, head, out);
901                         continue;
902                 }
903
904                 tpg = READ_ONCE(vs_tpg[*target]);
905                 if (unlikely(!tpg)) {
906                         /* Target does not exist, fail the request */
907                         vhost_scsi_send_bad_target(vs, vq, head, out);
908                         continue;
909                 }
910                 /*
911                  * Determine data_direction by calculating the total outgoing
912                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
913                  * response headers respectively.
914                  *
915                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
916                  * to the right place.
917                  *
918                  * For DMA_FROM_DEVICE, the iovec will be just past the end
919                  * of the virtio-scsi response header in either the same
920                  * or immediately following iovec.
921                  *
922                  * Any associated T10_PI bytes for the outgoing / incoming
923                  * payloads are included in calculation of exp_data_len here.
924                  */
925                 prot_bytes = 0;
926
927                 if (out_size > req_size) {
928                         data_direction = DMA_TO_DEVICE;
929                         exp_data_len = out_size - req_size;
930                         data_iter = out_iter;
931                 } else if (in_size > rsp_size) {
932                         data_direction = DMA_FROM_DEVICE;
933                         exp_data_len = in_size - rsp_size;
934
935                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
936                                       rsp_size + exp_data_len);
937                         iov_iter_advance(&in_iter, rsp_size);
938                         data_iter = in_iter;
939                 } else {
940                         data_direction = DMA_NONE;
941                         exp_data_len = 0;
942                 }
943                 /*
944                  * If T10_PI header + payload is present, setup prot_iter values
945                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
946                  * host scatterlists via get_user_pages_fast().
947                  */
948                 if (t10_pi) {
949                         if (v_req_pi.pi_bytesout) {
950                                 if (data_direction != DMA_TO_DEVICE) {
951                                         vq_err(vq, "Received non zero pi_bytesout,"
952                                                 " but wrong data_direction\n");
953                                         vhost_scsi_send_bad_target(vs, vq, head, out);
954                                         continue;
955                                 }
956                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
957                         } else if (v_req_pi.pi_bytesin) {
958                                 if (data_direction != DMA_FROM_DEVICE) {
959                                         vq_err(vq, "Received non zero pi_bytesin,"
960                                                 " but wrong data_direction\n");
961                                         vhost_scsi_send_bad_target(vs, vq, head, out);
962                                         continue;
963                                 }
964                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
965                         }
966                         /*
967                          * Set prot_iter to data_iter, and advance past any
968                          * preceeding prot_bytes that may be present.
969                          *
970                          * Also fix up the exp_data_len to reflect only the
971                          * actual data payload length.
972                          */
973                         if (prot_bytes) {
974                                 exp_data_len -= prot_bytes;
975                                 prot_iter = data_iter;
976                                 iov_iter_advance(&data_iter, prot_bytes);
977                         }
978                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
979                         task_attr = v_req_pi.task_attr;
980                         cdb = &v_req_pi.cdb[0];
981                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
982                 } else {
983                         tag = vhost64_to_cpu(vq, v_req.tag);
984                         task_attr = v_req.task_attr;
985                         cdb = &v_req.cdb[0];
986                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
987                 }
988                 /*
989                  * Check that the received CDB size does not exceeded our
990                  * hardcoded max for vhost-scsi, then get a pre-allocated
991                  * cmd descriptor for the new virtio-scsi tag.
992                  *
993                  * TODO what if cdb was too small for varlen cdb header?
994                  */
995                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
996                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
997                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
998                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
999                         vhost_scsi_send_bad_target(vs, vq, head, out);
1000                         continue;
1001                 }
1002                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1003                                          exp_data_len + prot_bytes,
1004                                          data_direction);
1005                 if (IS_ERR(cmd)) {
1006                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1007                                PTR_ERR(cmd));
1008                         vhost_scsi_send_bad_target(vs, vq, head, out);
1009                         continue;
1010                 }
1011                 cmd->tvc_vhost = vs;
1012                 cmd->tvc_vq = vq;
1013                 cmd->tvc_resp_iov = vq->iov[out];
1014                 cmd->tvc_in_iovs = in;
1015
1016                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1017                          cmd->tvc_cdb[0], cmd->tvc_lun);
1018                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1019                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1020
1021                 if (data_direction != DMA_NONE) {
1022                         ret = vhost_scsi_mapal(cmd,
1023                                                prot_bytes, &prot_iter,
1024                                                exp_data_len, &data_iter);
1025                         if (unlikely(ret)) {
1026                                 vq_err(vq, "Failed to map iov to sgl\n");
1027                                 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1028                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1029                                 continue;
1030                         }
1031                 }
1032                 /*
1033                  * Save the descriptor from vhost_get_vq_desc() to be used to
1034                  * complete the virtio-scsi request in TCM callback context via
1035                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1036                  */
1037                 cmd->tvc_vq_desc = head;
1038                 /*
1039                  * Dispatch cmd descriptor for cmwq execution in process
1040                  * context provided by vhost_scsi_workqueue.  This also ensures
1041                  * cmd is executed on the same kworker CPU as this vhost
1042                  * thread to gain positive L2 cache locality effects.
1043                  */
1044                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1045                 queue_work(vhost_scsi_workqueue, &cmd->work);
1046         }
1047 out:
1048         mutex_unlock(&vq->mutex);
1049 }
1050
1051 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1052 {
1053         pr_debug("%s: The handling func for control queue.\n", __func__);
1054 }
1055
1056 static void
1057 vhost_scsi_send_evt(struct vhost_scsi *vs,
1058                    struct vhost_scsi_tpg *tpg,
1059                    struct se_lun *lun,
1060                    u32 event,
1061                    u32 reason)
1062 {
1063         struct vhost_scsi_evt *evt;
1064
1065         evt = vhost_scsi_allocate_evt(vs, event, reason);
1066         if (!evt)
1067                 return;
1068
1069         if (tpg && lun) {
1070                 /* TODO: share lun setup code with virtio-scsi.ko */
1071                 /*
1072                  * Note: evt->event is zeroed when we allocate it and
1073                  * lun[4-7] need to be zero according to virtio-scsi spec.
1074                  */
1075                 evt->event.lun[0] = 0x01;
1076                 evt->event.lun[1] = tpg->tport_tpgt;
1077                 if (lun->unpacked_lun >= 256)
1078                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1079                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1080         }
1081
1082         llist_add(&evt->list, &vs->vs_event_list);
1083         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1084 }
1085
1086 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1087 {
1088         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1089                                                 poll.work);
1090         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1091
1092         mutex_lock(&vq->mutex);
1093         if (!vq->private_data)
1094                 goto out;
1095
1096         if (vs->vs_events_missed)
1097                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1098 out:
1099         mutex_unlock(&vq->mutex);
1100 }
1101
1102 static void vhost_scsi_handle_kick(struct vhost_work *work)
1103 {
1104         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1105                                                 poll.work);
1106         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1107
1108         vhost_scsi_handle_vq(vs, vq);
1109 }
1110
1111 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1112 {
1113         vhost_poll_flush(&vs->vqs[index].vq.poll);
1114 }
1115
1116 /* Callers must hold dev mutex */
1117 static void vhost_scsi_flush(struct vhost_scsi *vs)
1118 {
1119         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1120         int i;
1121
1122         /* Init new inflight and remember the old inflight */
1123         vhost_scsi_init_inflight(vs, old_inflight);
1124
1125         /*
1126          * The inflight->kref was initialized to 1. We decrement it here to
1127          * indicate the start of the flush operation so that it will reach 0
1128          * when all the reqs are finished.
1129          */
1130         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1131                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1132
1133         /* Flush both the vhost poll and vhost work */
1134         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1135                 vhost_scsi_flush_vq(vs, i);
1136         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1137         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1138
1139         /* Wait for all reqs issued before the flush to be finished */
1140         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1141                 wait_for_completion(&old_inflight[i]->comp);
1142 }
1143
1144 /*
1145  * Called from vhost_scsi_ioctl() context to walk the list of available
1146  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1147  *
1148  *  The lock nesting rule is:
1149  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1150  */
1151 static int
1152 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1153                         struct vhost_scsi_target *t)
1154 {
1155         struct se_portal_group *se_tpg;
1156         struct vhost_scsi_tport *tv_tport;
1157         struct vhost_scsi_tpg *tpg;
1158         struct vhost_scsi_tpg **vs_tpg;
1159         struct vhost_virtqueue *vq;
1160         int index, ret, i, len;
1161         bool match = false;
1162
1163         mutex_lock(&vhost_scsi_mutex);
1164         mutex_lock(&vs->dev.mutex);
1165
1166         /* Verify that ring has been setup correctly. */
1167         for (index = 0; index < vs->dev.nvqs; ++index) {
1168                 /* Verify that ring has been setup correctly. */
1169                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1170                         ret = -EFAULT;
1171                         goto out;
1172                 }
1173         }
1174
1175         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1176         vs_tpg = kzalloc(len, GFP_KERNEL);
1177         if (!vs_tpg) {
1178                 ret = -ENOMEM;
1179                 goto out;
1180         }
1181         if (vs->vs_tpg)
1182                 memcpy(vs_tpg, vs->vs_tpg, len);
1183
1184         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1185                 mutex_lock(&tpg->tv_tpg_mutex);
1186                 if (!tpg->tpg_nexus) {
1187                         mutex_unlock(&tpg->tv_tpg_mutex);
1188                         continue;
1189                 }
1190                 if (tpg->tv_tpg_vhost_count != 0) {
1191                         mutex_unlock(&tpg->tv_tpg_mutex);
1192                         continue;
1193                 }
1194                 tv_tport = tpg->tport;
1195
1196                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1197                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1198                                 kfree(vs_tpg);
1199                                 mutex_unlock(&tpg->tv_tpg_mutex);
1200                                 ret = -EEXIST;
1201                                 goto out;
1202                         }
1203                         /*
1204                          * In order to ensure individual vhost-scsi configfs
1205                          * groups cannot be removed while in use by vhost ioctl,
1206                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1207                          * dependency now.
1208                          */
1209                         se_tpg = &tpg->se_tpg;
1210                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1211                         if (ret) {
1212                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1213                                 kfree(vs_tpg);
1214                                 mutex_unlock(&tpg->tv_tpg_mutex);
1215                                 goto out;
1216                         }
1217                         tpg->tv_tpg_vhost_count++;
1218                         tpg->vhost_scsi = vs;
1219                         vs_tpg[tpg->tport_tpgt] = tpg;
1220                         smp_mb__after_atomic();
1221                         match = true;
1222                 }
1223                 mutex_unlock(&tpg->tv_tpg_mutex);
1224         }
1225
1226         if (match) {
1227                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1228                        sizeof(vs->vs_vhost_wwpn));
1229                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1230                         vq = &vs->vqs[i].vq;
1231                         mutex_lock(&vq->mutex);
1232                         vq->private_data = vs_tpg;
1233                         vhost_vq_init_access(vq);
1234                         mutex_unlock(&vq->mutex);
1235                 }
1236                 ret = 0;
1237         } else {
1238                 ret = -EEXIST;
1239         }
1240
1241         /*
1242          * Act as synchronize_rcu to make sure access to
1243          * old vs->vs_tpg is finished.
1244          */
1245         vhost_scsi_flush(vs);
1246         kfree(vs->vs_tpg);
1247         vs->vs_tpg = vs_tpg;
1248
1249 out:
1250         mutex_unlock(&vs->dev.mutex);
1251         mutex_unlock(&vhost_scsi_mutex);
1252         return ret;
1253 }
1254
1255 static int
1256 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1257                           struct vhost_scsi_target *t)
1258 {
1259         struct se_portal_group *se_tpg;
1260         struct vhost_scsi_tport *tv_tport;
1261         struct vhost_scsi_tpg *tpg;
1262         struct vhost_virtqueue *vq;
1263         bool match = false;
1264         int index, ret, i;
1265         u8 target;
1266
1267         mutex_lock(&vhost_scsi_mutex);
1268         mutex_lock(&vs->dev.mutex);
1269         /* Verify that ring has been setup correctly. */
1270         for (index = 0; index < vs->dev.nvqs; ++index) {
1271                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1272                         ret = -EFAULT;
1273                         goto err_dev;
1274                 }
1275         }
1276
1277         if (!vs->vs_tpg) {
1278                 ret = 0;
1279                 goto err_dev;
1280         }
1281
1282         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1283                 target = i;
1284                 tpg = vs->vs_tpg[target];
1285                 if (!tpg)
1286                         continue;
1287
1288                 mutex_lock(&tpg->tv_tpg_mutex);
1289                 tv_tport = tpg->tport;
1290                 if (!tv_tport) {
1291                         ret = -ENODEV;
1292                         goto err_tpg;
1293                 }
1294
1295                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1296                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1297                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1298                                 tv_tport->tport_name, tpg->tport_tpgt,
1299                                 t->vhost_wwpn, t->vhost_tpgt);
1300                         ret = -EINVAL;
1301                         goto err_tpg;
1302                 }
1303                 tpg->tv_tpg_vhost_count--;
1304                 tpg->vhost_scsi = NULL;
1305                 vs->vs_tpg[target] = NULL;
1306                 match = true;
1307                 mutex_unlock(&tpg->tv_tpg_mutex);
1308                 /*
1309                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1310                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1311                  */
1312                 se_tpg = &tpg->se_tpg;
1313                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1314         }
1315         if (match) {
1316                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1317                         vq = &vs->vqs[i].vq;
1318                         mutex_lock(&vq->mutex);
1319                         vq->private_data = NULL;
1320                         mutex_unlock(&vq->mutex);
1321                 }
1322         }
1323         /*
1324          * Act as synchronize_rcu to make sure access to
1325          * old vs->vs_tpg is finished.
1326          */
1327         vhost_scsi_flush(vs);
1328         kfree(vs->vs_tpg);
1329         vs->vs_tpg = NULL;
1330         WARN_ON(vs->vs_events_nr);
1331         mutex_unlock(&vs->dev.mutex);
1332         mutex_unlock(&vhost_scsi_mutex);
1333         return 0;
1334
1335 err_tpg:
1336         mutex_unlock(&tpg->tv_tpg_mutex);
1337 err_dev:
1338         mutex_unlock(&vs->dev.mutex);
1339         mutex_unlock(&vhost_scsi_mutex);
1340         return ret;
1341 }
1342
1343 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1344 {
1345         struct vhost_virtqueue *vq;
1346         int i;
1347
1348         if (features & ~VHOST_SCSI_FEATURES)
1349                 return -EOPNOTSUPP;
1350
1351         mutex_lock(&vs->dev.mutex);
1352         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1353             !vhost_log_access_ok(&vs->dev)) {
1354                 mutex_unlock(&vs->dev.mutex);
1355                 return -EFAULT;
1356         }
1357
1358         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1359                 vq = &vs->vqs[i].vq;
1360                 mutex_lock(&vq->mutex);
1361                 vq->acked_features = features;
1362                 mutex_unlock(&vq->mutex);
1363         }
1364         mutex_unlock(&vs->dev.mutex);
1365         return 0;
1366 }
1367
1368 static int vhost_scsi_open(struct inode *inode, struct file *f)
1369 {
1370         struct vhost_scsi *vs;
1371         struct vhost_virtqueue **vqs;
1372         int r = -ENOMEM, i;
1373
1374         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1375         if (!vs) {
1376                 vs = vzalloc(sizeof(*vs));
1377                 if (!vs)
1378                         goto err_vs;
1379         }
1380
1381         vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1382         if (!vqs)
1383                 goto err_vqs;
1384
1385         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1386         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1387
1388         vs->vs_events_nr = 0;
1389         vs->vs_events_missed = false;
1390
1391         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1392         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1393         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1394         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1395         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1396                 vqs[i] = &vs->vqs[i].vq;
1397                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1398         }
1399         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1400
1401         vhost_scsi_init_inflight(vs, NULL);
1402
1403         f->private_data = vs;
1404         return 0;
1405
1406 err_vqs:
1407         kvfree(vs);
1408 err_vs:
1409         return r;
1410 }
1411
1412 static int vhost_scsi_release(struct inode *inode, struct file *f)
1413 {
1414         struct vhost_scsi *vs = f->private_data;
1415         struct vhost_scsi_target t;
1416
1417         mutex_lock(&vs->dev.mutex);
1418         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1419         mutex_unlock(&vs->dev.mutex);
1420         vhost_scsi_clear_endpoint(vs, &t);
1421         vhost_dev_stop(&vs->dev);
1422         vhost_dev_cleanup(&vs->dev);
1423         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1424         vhost_scsi_flush(vs);
1425         kfree(vs->dev.vqs);
1426         kvfree(vs);
1427         return 0;
1428 }
1429
1430 static long
1431 vhost_scsi_ioctl(struct file *f,
1432                  unsigned int ioctl,
1433                  unsigned long arg)
1434 {
1435         struct vhost_scsi *vs = f->private_data;
1436         struct vhost_scsi_target backend;
1437         void __user *argp = (void __user *)arg;
1438         u64 __user *featurep = argp;
1439         u32 __user *eventsp = argp;
1440         u32 events_missed;
1441         u64 features;
1442         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1443         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1444
1445         switch (ioctl) {
1446         case VHOST_SCSI_SET_ENDPOINT:
1447                 if (copy_from_user(&backend, argp, sizeof backend))
1448                         return -EFAULT;
1449                 if (backend.reserved != 0)
1450                         return -EOPNOTSUPP;
1451
1452                 return vhost_scsi_set_endpoint(vs, &backend);
1453         case VHOST_SCSI_CLEAR_ENDPOINT:
1454                 if (copy_from_user(&backend, argp, sizeof backend))
1455                         return -EFAULT;
1456                 if (backend.reserved != 0)
1457                         return -EOPNOTSUPP;
1458
1459                 return vhost_scsi_clear_endpoint(vs, &backend);
1460         case VHOST_SCSI_GET_ABI_VERSION:
1461                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1462                         return -EFAULT;
1463                 return 0;
1464         case VHOST_SCSI_SET_EVENTS_MISSED:
1465                 if (get_user(events_missed, eventsp))
1466                         return -EFAULT;
1467                 mutex_lock(&vq->mutex);
1468                 vs->vs_events_missed = events_missed;
1469                 mutex_unlock(&vq->mutex);
1470                 return 0;
1471         case VHOST_SCSI_GET_EVENTS_MISSED:
1472                 mutex_lock(&vq->mutex);
1473                 events_missed = vs->vs_events_missed;
1474                 mutex_unlock(&vq->mutex);
1475                 if (put_user(events_missed, eventsp))
1476                         return -EFAULT;
1477                 return 0;
1478         case VHOST_GET_FEATURES:
1479                 features = VHOST_SCSI_FEATURES;
1480                 if (copy_to_user(featurep, &features, sizeof features))
1481                         return -EFAULT;
1482                 return 0;
1483         case VHOST_SET_FEATURES:
1484                 if (copy_from_user(&features, featurep, sizeof features))
1485                         return -EFAULT;
1486                 return vhost_scsi_set_features(vs, features);
1487         default:
1488                 mutex_lock(&vs->dev.mutex);
1489                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1490                 /* TODO: flush backend after dev ioctl. */
1491                 if (r == -ENOIOCTLCMD)
1492                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1493                 mutex_unlock(&vs->dev.mutex);
1494                 return r;
1495         }
1496 }
1497
1498 #ifdef CONFIG_COMPAT
1499 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1500                                 unsigned long arg)
1501 {
1502         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1503 }
1504 #endif
1505
1506 static const struct file_operations vhost_scsi_fops = {
1507         .owner          = THIS_MODULE,
1508         .release        = vhost_scsi_release,
1509         .unlocked_ioctl = vhost_scsi_ioctl,
1510 #ifdef CONFIG_COMPAT
1511         .compat_ioctl   = vhost_scsi_compat_ioctl,
1512 #endif
1513         .open           = vhost_scsi_open,
1514         .llseek         = noop_llseek,
1515 };
1516
1517 static struct miscdevice vhost_scsi_misc = {
1518         MISC_DYNAMIC_MINOR,
1519         "vhost-scsi",
1520         &vhost_scsi_fops,
1521 };
1522
1523 static int __init vhost_scsi_register(void)
1524 {
1525         return misc_register(&vhost_scsi_misc);
1526 }
1527
1528 static void vhost_scsi_deregister(void)
1529 {
1530         misc_deregister(&vhost_scsi_misc);
1531 }
1532
1533 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1534 {
1535         switch (tport->tport_proto_id) {
1536         case SCSI_PROTOCOL_SAS:
1537                 return "SAS";
1538         case SCSI_PROTOCOL_FCP:
1539                 return "FCP";
1540         case SCSI_PROTOCOL_ISCSI:
1541                 return "iSCSI";
1542         default:
1543                 break;
1544         }
1545
1546         return "Unknown";
1547 }
1548
1549 static void
1550 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1551                   struct se_lun *lun, bool plug)
1552 {
1553
1554         struct vhost_scsi *vs = tpg->vhost_scsi;
1555         struct vhost_virtqueue *vq;
1556         u32 reason;
1557
1558         if (!vs)
1559                 return;
1560
1561         mutex_lock(&vs->dev.mutex);
1562
1563         if (plug)
1564                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1565         else
1566                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1567
1568         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1569         mutex_lock(&vq->mutex);
1570         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1571                 vhost_scsi_send_evt(vs, tpg, lun,
1572                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1573         mutex_unlock(&vq->mutex);
1574         mutex_unlock(&vs->dev.mutex);
1575 }
1576
1577 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1578 {
1579         vhost_scsi_do_plug(tpg, lun, true);
1580 }
1581
1582 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1583 {
1584         vhost_scsi_do_plug(tpg, lun, false);
1585 }
1586
1587 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1588                                struct se_lun *lun)
1589 {
1590         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1591                                 struct vhost_scsi_tpg, se_tpg);
1592
1593         mutex_lock(&vhost_scsi_mutex);
1594
1595         mutex_lock(&tpg->tv_tpg_mutex);
1596         tpg->tv_tpg_port_count++;
1597         mutex_unlock(&tpg->tv_tpg_mutex);
1598
1599         vhost_scsi_hotplug(tpg, lun);
1600
1601         mutex_unlock(&vhost_scsi_mutex);
1602
1603         return 0;
1604 }
1605
1606 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1607                                   struct se_lun *lun)
1608 {
1609         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1610                                 struct vhost_scsi_tpg, se_tpg);
1611
1612         mutex_lock(&vhost_scsi_mutex);
1613
1614         mutex_lock(&tpg->tv_tpg_mutex);
1615         tpg->tv_tpg_port_count--;
1616         mutex_unlock(&tpg->tv_tpg_mutex);
1617
1618         vhost_scsi_hotunplug(tpg, lun);
1619
1620         mutex_unlock(&vhost_scsi_mutex);
1621 }
1622
1623 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1624 {
1625         struct vhost_scsi_cmd *tv_cmd;
1626         unsigned int i;
1627
1628         if (!se_sess->sess_cmd_map)
1629                 return;
1630
1631         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1632                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1633
1634                 kfree(tv_cmd->tvc_sgl);
1635                 kfree(tv_cmd->tvc_prot_sgl);
1636                 kfree(tv_cmd->tvc_upages);
1637         }
1638 }
1639
1640 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1641                 struct config_item *item, const char *page, size_t count)
1642 {
1643         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1644         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1645                                 struct vhost_scsi_tpg, se_tpg);
1646         unsigned long val;
1647         int ret = kstrtoul(page, 0, &val);
1648
1649         if (ret) {
1650                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1651                 return ret;
1652         }
1653         if (val != 0 && val != 1 && val != 3) {
1654                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1655                 return -EINVAL;
1656         }
1657         tpg->tv_fabric_prot_type = val;
1658
1659         return count;
1660 }
1661
1662 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1663                 struct config_item *item, char *page)
1664 {
1665         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1666         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1667                                 struct vhost_scsi_tpg, se_tpg);
1668
1669         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1670 }
1671
1672 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1673
1674 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1675         &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1676         NULL,
1677 };
1678
1679 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1680                                struct se_session *se_sess, void *p)
1681 {
1682         struct vhost_scsi_cmd *tv_cmd;
1683         unsigned int i;
1684
1685         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1686                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1687
1688                 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1689                                           sizeof(struct scatterlist),
1690                                           GFP_KERNEL);
1691                 if (!tv_cmd->tvc_sgl) {
1692                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1693                         goto out;
1694                 }
1695
1696                 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1697                                              sizeof(struct page *),
1698                                              GFP_KERNEL);
1699                 if (!tv_cmd->tvc_upages) {
1700                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1701                         goto out;
1702                 }
1703
1704                 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1705                                                sizeof(struct scatterlist),
1706                                                GFP_KERNEL);
1707                 if (!tv_cmd->tvc_prot_sgl) {
1708                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1709                         goto out;
1710                 }
1711         }
1712         return 0;
1713 out:
1714         vhost_scsi_free_cmd_map_res(se_sess);
1715         return -ENOMEM;
1716 }
1717
1718 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1719                                 const char *name)
1720 {
1721         struct vhost_scsi_nexus *tv_nexus;
1722
1723         mutex_lock(&tpg->tv_tpg_mutex);
1724         if (tpg->tpg_nexus) {
1725                 mutex_unlock(&tpg->tv_tpg_mutex);
1726                 pr_debug("tpg->tpg_nexus already exists\n");
1727                 return -EEXIST;
1728         }
1729
1730         tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1731         if (!tv_nexus) {
1732                 mutex_unlock(&tpg->tv_tpg_mutex);
1733                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1734                 return -ENOMEM;
1735         }
1736         /*
1737          * Since we are running in 'demo mode' this call with generate a
1738          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1739          * the SCSI Initiator port name of the passed configfs group 'name'.
1740          */
1741         tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1742                                         VHOST_SCSI_DEFAULT_TAGS,
1743                                         sizeof(struct vhost_scsi_cmd),
1744                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1745                                         (unsigned char *)name, tv_nexus,
1746                                         vhost_scsi_nexus_cb);
1747         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1748                 mutex_unlock(&tpg->tv_tpg_mutex);
1749                 kfree(tv_nexus);
1750                 return -ENOMEM;
1751         }
1752         tpg->tpg_nexus = tv_nexus;
1753
1754         mutex_unlock(&tpg->tv_tpg_mutex);
1755         return 0;
1756 }
1757
1758 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1759 {
1760         struct se_session *se_sess;
1761         struct vhost_scsi_nexus *tv_nexus;
1762
1763         mutex_lock(&tpg->tv_tpg_mutex);
1764         tv_nexus = tpg->tpg_nexus;
1765         if (!tv_nexus) {
1766                 mutex_unlock(&tpg->tv_tpg_mutex);
1767                 return -ENODEV;
1768         }
1769
1770         se_sess = tv_nexus->tvn_se_sess;
1771         if (!se_sess) {
1772                 mutex_unlock(&tpg->tv_tpg_mutex);
1773                 return -ENODEV;
1774         }
1775
1776         if (tpg->tv_tpg_port_count != 0) {
1777                 mutex_unlock(&tpg->tv_tpg_mutex);
1778                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1779                         " active TPG port count: %d\n",
1780                         tpg->tv_tpg_port_count);
1781                 return -EBUSY;
1782         }
1783
1784         if (tpg->tv_tpg_vhost_count != 0) {
1785                 mutex_unlock(&tpg->tv_tpg_mutex);
1786                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1787                         " active TPG vhost count: %d\n",
1788                         tpg->tv_tpg_vhost_count);
1789                 return -EBUSY;
1790         }
1791
1792         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1793                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1794                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1795
1796         vhost_scsi_free_cmd_map_res(se_sess);
1797         /*
1798          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1799          */
1800         target_remove_session(se_sess);
1801         tpg->tpg_nexus = NULL;
1802         mutex_unlock(&tpg->tv_tpg_mutex);
1803
1804         kfree(tv_nexus);
1805         return 0;
1806 }
1807
1808 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1809 {
1810         struct se_portal_group *se_tpg = to_tpg(item);
1811         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1812                                 struct vhost_scsi_tpg, se_tpg);
1813         struct vhost_scsi_nexus *tv_nexus;
1814         ssize_t ret;
1815
1816         mutex_lock(&tpg->tv_tpg_mutex);
1817         tv_nexus = tpg->tpg_nexus;
1818         if (!tv_nexus) {
1819                 mutex_unlock(&tpg->tv_tpg_mutex);
1820                 return -ENODEV;
1821         }
1822         ret = snprintf(page, PAGE_SIZE, "%s\n",
1823                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1824         mutex_unlock(&tpg->tv_tpg_mutex);
1825
1826         return ret;
1827 }
1828
1829 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1830                 const char *page, size_t count)
1831 {
1832         struct se_portal_group *se_tpg = to_tpg(item);
1833         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1834                                 struct vhost_scsi_tpg, se_tpg);
1835         struct vhost_scsi_tport *tport_wwn = tpg->tport;
1836         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1837         int ret;
1838         /*
1839          * Shutdown the active I_T nexus if 'NULL' is passed..
1840          */
1841         if (!strncmp(page, "NULL", 4)) {
1842                 ret = vhost_scsi_drop_nexus(tpg);
1843                 return (!ret) ? count : ret;
1844         }
1845         /*
1846          * Otherwise make sure the passed virtual Initiator port WWN matches
1847          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1848          * vhost_scsi_make_nexus().
1849          */
1850         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1851                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1852                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1853                 return -EINVAL;
1854         }
1855         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1856
1857         ptr = strstr(i_port, "naa.");
1858         if (ptr) {
1859                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1860                         pr_err("Passed SAS Initiator Port %s does not"
1861                                 " match target port protoid: %s\n", i_port,
1862                                 vhost_scsi_dump_proto_id(tport_wwn));
1863                         return -EINVAL;
1864                 }
1865                 port_ptr = &i_port[0];
1866                 goto check_newline;
1867         }
1868         ptr = strstr(i_port, "fc.");
1869         if (ptr) {
1870                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1871                         pr_err("Passed FCP Initiator Port %s does not"
1872                                 " match target port protoid: %s\n", i_port,
1873                                 vhost_scsi_dump_proto_id(tport_wwn));
1874                         return -EINVAL;
1875                 }
1876                 port_ptr = &i_port[3]; /* Skip over "fc." */
1877                 goto check_newline;
1878         }
1879         ptr = strstr(i_port, "iqn.");
1880         if (ptr) {
1881                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1882                         pr_err("Passed iSCSI Initiator Port %s does not"
1883                                 " match target port protoid: %s\n", i_port,
1884                                 vhost_scsi_dump_proto_id(tport_wwn));
1885                         return -EINVAL;
1886                 }
1887                 port_ptr = &i_port[0];
1888                 goto check_newline;
1889         }
1890         pr_err("Unable to locate prefix for emulated Initiator Port:"
1891                         " %s\n", i_port);
1892         return -EINVAL;
1893         /*
1894          * Clear any trailing newline for the NAA WWN
1895          */
1896 check_newline:
1897         if (i_port[strlen(i_port)-1] == '\n')
1898                 i_port[strlen(i_port)-1] = '\0';
1899
1900         ret = vhost_scsi_make_nexus(tpg, port_ptr);
1901         if (ret < 0)
1902                 return ret;
1903
1904         return count;
1905 }
1906
1907 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1908
1909 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1910         &vhost_scsi_tpg_attr_nexus,
1911         NULL,
1912 };
1913
1914 static struct se_portal_group *
1915 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
1916 {
1917         struct vhost_scsi_tport *tport = container_of(wwn,
1918                         struct vhost_scsi_tport, tport_wwn);
1919
1920         struct vhost_scsi_tpg *tpg;
1921         u16 tpgt;
1922         int ret;
1923
1924         if (strstr(name, "tpgt_") != name)
1925                 return ERR_PTR(-EINVAL);
1926         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1927                 return ERR_PTR(-EINVAL);
1928
1929         tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
1930         if (!tpg) {
1931                 pr_err("Unable to allocate struct vhost_scsi_tpg");
1932                 return ERR_PTR(-ENOMEM);
1933         }
1934         mutex_init(&tpg->tv_tpg_mutex);
1935         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1936         tpg->tport = tport;
1937         tpg->tport_tpgt = tpgt;
1938
1939         ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1940         if (ret < 0) {
1941                 kfree(tpg);
1942                 return NULL;
1943         }
1944         mutex_lock(&vhost_scsi_mutex);
1945         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1946         mutex_unlock(&vhost_scsi_mutex);
1947
1948         return &tpg->se_tpg;
1949 }
1950
1951 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1952 {
1953         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1954                                 struct vhost_scsi_tpg, se_tpg);
1955
1956         mutex_lock(&vhost_scsi_mutex);
1957         list_del(&tpg->tv_tpg_list);
1958         mutex_unlock(&vhost_scsi_mutex);
1959         /*
1960          * Release the virtual I_T Nexus for this vhost TPG
1961          */
1962         vhost_scsi_drop_nexus(tpg);
1963         /*
1964          * Deregister the se_tpg from TCM..
1965          */
1966         core_tpg_deregister(se_tpg);
1967         kfree(tpg);
1968 }
1969
1970 static struct se_wwn *
1971 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
1972                      struct config_group *group,
1973                      const char *name)
1974 {
1975         struct vhost_scsi_tport *tport;
1976         char *ptr;
1977         u64 wwpn = 0;
1978         int off = 0;
1979
1980         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1981                 return ERR_PTR(-EINVAL); */
1982
1983         tport = kzalloc(sizeof(*tport), GFP_KERNEL);
1984         if (!tport) {
1985                 pr_err("Unable to allocate struct vhost_scsi_tport");
1986                 return ERR_PTR(-ENOMEM);
1987         }
1988         tport->tport_wwpn = wwpn;
1989         /*
1990          * Determine the emulated Protocol Identifier and Target Port Name
1991          * based on the incoming configfs directory name.
1992          */
1993         ptr = strstr(name, "naa.");
1994         if (ptr) {
1995                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1996                 goto check_len;
1997         }
1998         ptr = strstr(name, "fc.");
1999         if (ptr) {
2000                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2001                 off = 3; /* Skip over "fc." */
2002                 goto check_len;
2003         }
2004         ptr = strstr(name, "iqn.");
2005         if (ptr) {
2006                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2007                 goto check_len;
2008         }
2009
2010         pr_err("Unable to locate prefix for emulated Target Port:"
2011                         " %s\n", name);
2012         kfree(tport);
2013         return ERR_PTR(-EINVAL);
2014
2015 check_len:
2016         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2017                 pr_err("Emulated %s Address: %s, exceeds"
2018                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2019                         VHOST_SCSI_NAMELEN);
2020                 kfree(tport);
2021                 return ERR_PTR(-EINVAL);
2022         }
2023         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2024
2025         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2026                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2027
2028         return &tport->tport_wwn;
2029 }
2030
2031 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2032 {
2033         struct vhost_scsi_tport *tport = container_of(wwn,
2034                                 struct vhost_scsi_tport, tport_wwn);
2035
2036         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2037                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2038                 tport->tport_name);
2039
2040         kfree(tport);
2041 }
2042
2043 static ssize_t
2044 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2045 {
2046         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2047                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2048                 utsname()->machine);
2049 }
2050
2051 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2052
2053 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2054         &vhost_scsi_wwn_attr_version,
2055         NULL,
2056 };
2057
2058 static const struct target_core_fabric_ops vhost_scsi_ops = {
2059         .module                         = THIS_MODULE,
2060         .name                           = "vhost",
2061         .get_fabric_name                = vhost_scsi_get_fabric_name,
2062         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2063         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2064         .tpg_check_demo_mode            = vhost_scsi_check_true,
2065         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2066         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2067         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2068         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2069         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2070         .release_cmd                    = vhost_scsi_release_cmd,
2071         .check_stop_free                = vhost_scsi_check_stop_free,
2072         .sess_get_index                 = vhost_scsi_sess_get_index,
2073         .sess_get_initiator_sid         = NULL,
2074         .write_pending                  = vhost_scsi_write_pending,
2075         .write_pending_status           = vhost_scsi_write_pending_status,
2076         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2077         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2078         .queue_data_in                  = vhost_scsi_queue_data_in,
2079         .queue_status                   = vhost_scsi_queue_status,
2080         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2081         .aborted_task                   = vhost_scsi_aborted_task,
2082         /*
2083          * Setup callers for generic logic in target_core_fabric_configfs.c
2084          */
2085         .fabric_make_wwn                = vhost_scsi_make_tport,
2086         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2087         .fabric_make_tpg                = vhost_scsi_make_tpg,
2088         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2089         .fabric_post_link               = vhost_scsi_port_link,
2090         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2091
2092         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2093         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2094         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2095 };
2096
2097 static int __init vhost_scsi_init(void)
2098 {
2099         int ret = -ENOMEM;
2100
2101         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2102                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2103                 utsname()->machine);
2104
2105         /*
2106          * Use our own dedicated workqueue for submitting I/O into
2107          * target core to avoid contention within system_wq.
2108          */
2109         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2110         if (!vhost_scsi_workqueue)
2111                 goto out;
2112
2113         ret = vhost_scsi_register();
2114         if (ret < 0)
2115                 goto out_destroy_workqueue;
2116
2117         ret = target_register_template(&vhost_scsi_ops);
2118         if (ret < 0)
2119                 goto out_vhost_scsi_deregister;
2120
2121         return 0;
2122
2123 out_vhost_scsi_deregister:
2124         vhost_scsi_deregister();
2125 out_destroy_workqueue:
2126         destroy_workqueue(vhost_scsi_workqueue);
2127 out:
2128         return ret;
2129 };
2130
2131 static void vhost_scsi_exit(void)
2132 {
2133         target_unregister_template(&vhost_scsi_ops);
2134         vhost_scsi_deregister();
2135         destroy_workqueue(vhost_scsi_workqueue);
2136 };
2137
2138 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2139 MODULE_ALIAS("tcm_vhost");
2140 MODULE_LICENSE("GPL");
2141 module_init(vhost_scsi_init);
2142 module_exit(vhost_scsi_exit);