fs/proc/proc_sysctl.c: fix NULL pointer dereference in put_links
[muen/linux.git] / drivers / nvme / target / io-cmd-file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include "nvmet.h"
12
13 #define NVMET_MAX_MPOOL_BVEC            16
14 #define NVMET_MIN_MPOOL_OBJ             16
15
16 void nvmet_file_ns_disable(struct nvmet_ns *ns)
17 {
18         if (ns->file) {
19                 if (ns->buffered_io)
20                         flush_workqueue(buffered_io_wq);
21                 mempool_destroy(ns->bvec_pool);
22                 ns->bvec_pool = NULL;
23                 kmem_cache_destroy(ns->bvec_cache);
24                 ns->bvec_cache = NULL;
25                 fput(ns->file);
26                 ns->file = NULL;
27         }
28 }
29
30 int nvmet_file_ns_enable(struct nvmet_ns *ns)
31 {
32         int flags = O_RDWR | O_LARGEFILE;
33         struct kstat stat;
34         int ret;
35
36         if (!ns->buffered_io)
37                 flags |= O_DIRECT;
38
39         ns->file = filp_open(ns->device_path, flags, 0);
40         if (IS_ERR(ns->file)) {
41                 pr_err("failed to open file %s: (%ld)\n",
42                                 ns->device_path, PTR_ERR(ns->file));
43                 return PTR_ERR(ns->file);
44         }
45
46         ret = vfs_getattr(&ns->file->f_path,
47                         &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
48         if (ret)
49                 goto err;
50
51         ns->size = stat.size;
52         ns->blksize_shift = file_inode(ns->file)->i_blkbits;
53
54         ns->bvec_cache = kmem_cache_create("nvmet-bvec",
55                         NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
56                         0, SLAB_HWCACHE_ALIGN, NULL);
57         if (!ns->bvec_cache) {
58                 ret = -ENOMEM;
59                 goto err;
60         }
61
62         ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
63                         mempool_free_slab, ns->bvec_cache);
64
65         if (!ns->bvec_pool) {
66                 ret = -ENOMEM;
67                 goto err;
68         }
69
70         return ret;
71 err:
72         ns->size = 0;
73         ns->blksize_shift = 0;
74         nvmet_file_ns_disable(ns);
75         return ret;
76 }
77
78 static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
79 {
80         bv->bv_page = sg_page_iter_page(iter);
81         bv->bv_offset = iter->sg->offset;
82         bv->bv_len = PAGE_SIZE - iter->sg->offset;
83 }
84
85 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
86                 unsigned long nr_segs, size_t count, int ki_flags)
87 {
88         struct kiocb *iocb = &req->f.iocb;
89         ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
90         struct iov_iter iter;
91         int rw;
92
93         if (req->cmd->rw.opcode == nvme_cmd_write) {
94                 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
95                         ki_flags |= IOCB_DSYNC;
96                 call_iter = req->ns->file->f_op->write_iter;
97                 rw = WRITE;
98         } else {
99                 call_iter = req->ns->file->f_op->read_iter;
100                 rw = READ;
101         }
102
103         iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
104
105         iocb->ki_pos = pos;
106         iocb->ki_filp = req->ns->file;
107         iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
108
109         return call_iter(iocb, &iter);
110 }
111
112 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
113 {
114         struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
115         u16 status = NVME_SC_SUCCESS;
116
117         if (req->f.bvec != req->inline_bvec) {
118                 if (likely(req->f.mpool_alloc == false))
119                         kfree(req->f.bvec);
120                 else
121                         mempool_free(req->f.bvec, req->ns->bvec_pool);
122         }
123
124         if (unlikely(ret != req->data_len))
125                 status = errno_to_nvme_status(req, ret);
126         nvmet_req_complete(req, status);
127 }
128
129 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
130 {
131         ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
132         struct sg_page_iter sg_pg_iter;
133         unsigned long bv_cnt = 0;
134         bool is_sync = false;
135         size_t len = 0, total_len = 0;
136         ssize_t ret = 0;
137         loff_t pos;
138
139
140         if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
141                 is_sync = true;
142
143         pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
144         if (unlikely(pos + req->data_len > req->ns->size)) {
145                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
146                 return true;
147         }
148
149         memset(&req->f.iocb, 0, sizeof(struct kiocb));
150         for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
151                 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
152                 len += req->f.bvec[bv_cnt].bv_len;
153                 total_len += req->f.bvec[bv_cnt].bv_len;
154                 bv_cnt++;
155
156                 WARN_ON_ONCE((nr_bvec - 1) < 0);
157
158                 if (unlikely(is_sync) &&
159                     (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
160                         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
161                         if (ret < 0)
162                                 goto complete;
163
164                         pos += len;
165                         bv_cnt = 0;
166                         len = 0;
167                 }
168                 nr_bvec--;
169         }
170
171         if (WARN_ON_ONCE(total_len != req->data_len)) {
172                 ret = -EIO;
173                 goto complete;
174         }
175
176         if (unlikely(is_sync)) {
177                 ret = total_len;
178                 goto complete;
179         }
180
181         /*
182          * A NULL ki_complete ask for synchronous execution, which we want
183          * for the IOCB_NOWAIT case.
184          */
185         if (!(ki_flags & IOCB_NOWAIT))
186                 req->f.iocb.ki_complete = nvmet_file_io_done;
187
188         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
189
190         switch (ret) {
191         case -EIOCBQUEUED:
192                 return true;
193         case -EAGAIN:
194                 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
195                         goto complete;
196                 return false;
197         case -EOPNOTSUPP:
198                 /*
199                  * For file systems returning error -EOPNOTSUPP, handle
200                  * IOCB_NOWAIT error case separately and retry without
201                  * IOCB_NOWAIT.
202                  */
203                 if ((ki_flags & IOCB_NOWAIT))
204                         return false;
205                 break;
206         }
207
208 complete:
209         nvmet_file_io_done(&req->f.iocb, ret, 0);
210         return true;
211 }
212
213 static void nvmet_file_buffered_io_work(struct work_struct *w)
214 {
215         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
216
217         nvmet_file_execute_io(req, 0);
218 }
219
220 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
221 {
222         INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
223         queue_work(buffered_io_wq, &req->f.work);
224 }
225
226 static void nvmet_file_execute_rw(struct nvmet_req *req)
227 {
228         ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
229
230         if (!req->sg_cnt || !nr_bvec) {
231                 nvmet_req_complete(req, 0);
232                 return;
233         }
234
235         if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
236                 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
237                                 GFP_KERNEL);
238         else
239                 req->f.bvec = req->inline_bvec;
240
241         if (unlikely(!req->f.bvec)) {
242                 /* fallback under memory pressure */
243                 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
244                 req->f.mpool_alloc = true;
245         } else
246                 req->f.mpool_alloc = false;
247
248         if (req->ns->buffered_io) {
249                 if (likely(!req->f.mpool_alloc) &&
250                                 nvmet_file_execute_io(req, IOCB_NOWAIT))
251                         return;
252                 nvmet_file_submit_buffered_io(req);
253         } else
254                 nvmet_file_execute_io(req, 0);
255 }
256
257 u16 nvmet_file_flush(struct nvmet_req *req)
258 {
259         return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
260 }
261
262 static void nvmet_file_flush_work(struct work_struct *w)
263 {
264         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
265
266         nvmet_req_complete(req, nvmet_file_flush(req));
267 }
268
269 static void nvmet_file_execute_flush(struct nvmet_req *req)
270 {
271         INIT_WORK(&req->f.work, nvmet_file_flush_work);
272         schedule_work(&req->f.work);
273 }
274
275 static void nvmet_file_execute_discard(struct nvmet_req *req)
276 {
277         int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
278         struct nvme_dsm_range range;
279         loff_t offset, len;
280         u16 status = 0;
281         int ret;
282         int i;
283
284         for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
285                 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
286                                         sizeof(range));
287                 if (status)
288                         break;
289
290                 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
291                 len = le32_to_cpu(range.nlb);
292                 len <<= req->ns->blksize_shift;
293                 if (offset + len > req->ns->size) {
294                         req->error_slba = le64_to_cpu(range.slba);
295                         status = errno_to_nvme_status(req, -ENOSPC);
296                         break;
297                 }
298
299                 ret = vfs_fallocate(req->ns->file, mode, offset, len);
300                 if (ret && ret != -EOPNOTSUPP) {
301                         req->error_slba = le64_to_cpu(range.slba);
302                         status = errno_to_nvme_status(req, ret);
303                         break;
304                 }
305         }
306
307         nvmet_req_complete(req, status);
308 }
309
310 static void nvmet_file_dsm_work(struct work_struct *w)
311 {
312         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
313
314         switch (le32_to_cpu(req->cmd->dsm.attributes)) {
315         case NVME_DSMGMT_AD:
316                 nvmet_file_execute_discard(req);
317                 return;
318         case NVME_DSMGMT_IDR:
319         case NVME_DSMGMT_IDW:
320         default:
321                 /* Not supported yet */
322                 nvmet_req_complete(req, 0);
323                 return;
324         }
325 }
326
327 static void nvmet_file_execute_dsm(struct nvmet_req *req)
328 {
329         INIT_WORK(&req->f.work, nvmet_file_dsm_work);
330         schedule_work(&req->f.work);
331 }
332
333 static void nvmet_file_write_zeroes_work(struct work_struct *w)
334 {
335         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
336         struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
337         int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
338         loff_t offset;
339         loff_t len;
340         int ret;
341
342         offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
343         len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
344                         req->ns->blksize_shift);
345
346         if (unlikely(offset + len > req->ns->size)) {
347                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
348                 return;
349         }
350
351         ret = vfs_fallocate(req->ns->file, mode, offset, len);
352         nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
353 }
354
355 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
356 {
357         INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
358         schedule_work(&req->f.work);
359 }
360
361 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
362 {
363         struct nvme_command *cmd = req->cmd;
364
365         switch (cmd->common.opcode) {
366         case nvme_cmd_read:
367         case nvme_cmd_write:
368                 req->execute = nvmet_file_execute_rw;
369                 req->data_len = nvmet_rw_len(req);
370                 return 0;
371         case nvme_cmd_flush:
372                 req->execute = nvmet_file_execute_flush;
373                 req->data_len = 0;
374                 return 0;
375         case nvme_cmd_dsm:
376                 req->execute = nvmet_file_execute_dsm;
377                 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
378                         sizeof(struct nvme_dsm_range);
379                 return 0;
380         case nvme_cmd_write_zeroes:
381                 req->execute = nvmet_file_execute_write_zeroes;
382                 req->data_len = 0;
383                 return 0;
384         default:
385                 pr_err("unhandled cmd for file ns %d on qid %d\n",
386                                 cmd->common.opcode, req->sq->qid);
387                 req->error_loc = offsetof(struct nvme_common_command, opcode);
388                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
389         }
390 }