2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
58 static unsigned int max_backlog = 1024;
60 static struct ctl_table_header *ucma_ctl_table_hdr;
61 static struct ctl_table ucma_ctl_table[] = {
63 .procname = "max_backlog",
65 .maxlen = sizeof max_backlog,
67 .proc_handler = proc_dointvec,
75 struct list_head ctx_list;
76 struct list_head event_list;
77 wait_queue_head_t poll_wait;
78 struct workqueue_struct *close_wq;
83 struct completion comp;
88 struct ucma_file *file;
89 struct rdma_cm_id *cm_id;
92 struct list_head list;
93 struct list_head mc_list;
94 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
98 /* sync between removal event and id destroy, protected by file mut */
100 struct work_struct close_work;
103 struct ucma_multicast {
104 struct ucma_context *ctx;
110 struct list_head list;
111 struct sockaddr_storage addr;
115 struct ucma_context *ctx;
116 struct ucma_multicast *mc;
117 struct list_head list;
118 struct rdma_cm_id *cm_id;
119 struct rdma_ucm_event_resp resp;
120 struct work_struct close_work;
123 static DEFINE_MUTEX(mut);
124 static DEFINE_IDR(ctx_idr);
125 static DEFINE_IDR(multicast_idr);
127 static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file)
130 struct ucma_context *ctx;
132 ctx = idr_find(&ctx_idr, id);
134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file || !ctx->cm_id)
136 ctx = ERR_PTR(-EINVAL);
140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
142 struct ucma_context *ctx;
145 ctx = _ucma_find_context(id, file);
150 atomic_inc(&ctx->ref);
156 static void ucma_put_ctx(struct ucma_context *ctx)
158 if (atomic_dec_and_test(&ctx->ref))
159 complete(&ctx->comp);
162 static void ucma_close_event_id(struct work_struct *work)
164 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
166 rdma_destroy_id(uevent_close->cm_id);
170 static void ucma_close_id(struct work_struct *work)
172 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
174 /* once all inflight tasks are finished, we close all underlying
175 * resources. The context is still alive till its explicit destryoing
179 wait_for_completion(&ctx->comp);
180 /* No new events will be generated after destroying the id. */
181 rdma_destroy_id(ctx->cm_id);
184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
186 struct ucma_context *ctx;
188 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
192 INIT_WORK(&ctx->close_work, ucma_close_id);
193 atomic_set(&ctx->ref, 1);
194 init_completion(&ctx->comp);
195 INIT_LIST_HEAD(&ctx->mc_list);
199 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
204 list_add_tail(&ctx->list, &file->ctx_list);
212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
214 struct ucma_multicast *mc;
216 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
221 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
227 list_add_tail(&mc->list, &ctx->mc_list);
235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
236 struct rdma_conn_param *src)
238 if (src->private_data_len)
239 memcpy(dst->private_data, src->private_data,
240 src->private_data_len);
241 dst->private_data_len = src->private_data_len;
242 dst->responder_resources =src->responder_resources;
243 dst->initiator_depth = src->initiator_depth;
244 dst->flow_control = src->flow_control;
245 dst->retry_count = src->retry_count;
246 dst->rnr_retry_count = src->rnr_retry_count;
248 dst->qp_num = src->qp_num;
251 static void ucma_copy_ud_event(struct ib_device *device,
252 struct rdma_ucm_ud_param *dst,
253 struct rdma_ud_param *src)
255 if (src->private_data_len)
256 memcpy(dst->private_data, src->private_data,
257 src->private_data_len);
258 dst->private_data_len = src->private_data_len;
259 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
260 dst->qp_num = src->qp_num;
261 dst->qkey = src->qkey;
264 static void ucma_set_event_context(struct ucma_context *ctx,
265 struct rdma_cm_event *event,
266 struct ucma_event *uevent)
269 switch (event->event) {
270 case RDMA_CM_EVENT_MULTICAST_JOIN:
271 case RDMA_CM_EVENT_MULTICAST_ERROR:
272 uevent->mc = (struct ucma_multicast *)
273 event->param.ud.private_data;
274 uevent->resp.uid = uevent->mc->uid;
275 uevent->resp.id = uevent->mc->id;
278 uevent->resp.uid = ctx->uid;
279 uevent->resp.id = ctx->id;
284 /* Called with file->mut locked for the relevant context. */
285 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
287 struct ucma_context *ctx = cm_id->context;
288 struct ucma_event *con_req_eve;
294 /* only if context is pointing to cm_id that it owns it and can be
295 * queued to be closed, otherwise that cm_id is an inflight one that
296 * is part of that context event list pending to be detached and
297 * reattached to its new context as part of ucma_get_event,
298 * handled separately below.
300 if (ctx->cm_id == cm_id) {
304 queue_work(ctx->file->close_wq, &ctx->close_work);
308 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
309 if (con_req_eve->cm_id == cm_id &&
310 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
311 list_del(&con_req_eve->list);
312 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
313 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
319 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
322 static int ucma_event_handler(struct rdma_cm_id *cm_id,
323 struct rdma_cm_event *event)
325 struct ucma_event *uevent;
326 struct ucma_context *ctx = cm_id->context;
329 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
331 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
333 mutex_lock(&ctx->file->mut);
334 uevent->cm_id = cm_id;
335 ucma_set_event_context(ctx, event, uevent);
336 uevent->resp.event = event->event;
337 uevent->resp.status = event->status;
338 if (cm_id->qp_type == IB_QPT_UD)
339 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
342 ucma_copy_conn_event(&uevent->resp.param.conn,
345 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
352 } else if (!ctx->uid || ctx->cm_id != cm_id) {
354 * We ignore events for new connections until userspace has set
355 * their context. This can only happen if an error occurs on a
356 * new connection before the user accepts it. This is okay,
357 * since the accept will just fail later. However, we do need
358 * to release the underlying HW resources in case of a device
361 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
362 ucma_removal_event_handler(cm_id);
368 list_add_tail(&uevent->list, &ctx->file->event_list);
369 wake_up_interruptible(&ctx->file->poll_wait);
370 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
371 ucma_removal_event_handler(cm_id);
373 mutex_unlock(&ctx->file->mut);
377 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
378 int in_len, int out_len)
380 struct ucma_context *ctx;
381 struct rdma_ucm_get_event cmd;
382 struct ucma_event *uevent;
385 if (out_len < sizeof uevent->resp)
388 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
391 mutex_lock(&file->mut);
392 while (list_empty(&file->event_list)) {
393 mutex_unlock(&file->mut);
395 if (file->filp->f_flags & O_NONBLOCK)
398 if (wait_event_interruptible(file->poll_wait,
399 !list_empty(&file->event_list)))
402 mutex_lock(&file->mut);
405 uevent = list_entry(file->event_list.next, struct ucma_event, list);
407 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
408 ctx = ucma_alloc_ctx(file);
413 uevent->ctx->backlog++;
414 ctx->cm_id = uevent->cm_id;
415 ctx->cm_id->context = ctx;
416 uevent->resp.id = ctx->id;
419 if (copy_to_user((void __user *)(unsigned long)cmd.response,
420 &uevent->resp, sizeof uevent->resp)) {
425 list_del(&uevent->list);
426 uevent->ctx->events_reported++;
428 uevent->mc->events_reported++;
431 mutex_unlock(&file->mut);
435 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
439 *qp_type = IB_QPT_RC;
443 *qp_type = IB_QPT_UD;
446 *qp_type = cmd->qp_type;
453 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
454 int in_len, int out_len)
456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx;
459 struct rdma_cm_id *cm_id;
460 enum ib_qp_type qp_type;
463 if (out_len < sizeof(resp))
466 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
469 ret = ucma_get_qp_type(&cmd, &qp_type);
473 mutex_lock(&file->mut);
474 ctx = ucma_alloc_ctx(file);
475 mutex_unlock(&file->mut);
480 cm_id = rdma_create_id(current->nsproxy->net_ns,
481 ucma_event_handler, ctx, cmd.ps, qp_type);
483 ret = PTR_ERR(cm_id);
488 if (copy_to_user((void __user *)(unsigned long)cmd.response,
489 &resp, sizeof(resp))) {
498 rdma_destroy_id(cm_id);
501 idr_remove(&ctx_idr, ctx->id);
503 mutex_lock(&file->mut);
504 list_del(&ctx->list);
505 mutex_unlock(&file->mut);
510 static void ucma_cleanup_multicast(struct ucma_context *ctx)
512 struct ucma_multicast *mc, *tmp;
515 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
517 idr_remove(&multicast_idr, mc->id);
523 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
525 struct ucma_event *uevent, *tmp;
527 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
528 if (uevent->mc != mc)
531 list_del(&uevent->list);
537 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
538 * this point, no new events will be reported from the hardware. However, we
539 * still need to cleanup the UCMA context for this ID. Specifically, there
540 * might be events that have not yet been consumed by the user space software.
541 * These might include pending connect requests which we have not completed
542 * processing. We cannot call rdma_destroy_id while holding the lock of the
543 * context (file->mut), as it might cause a deadlock. We therefore extract all
544 * relevant events from the context pending events list while holding the
545 * mutex. After that we release them as needed.
547 static int ucma_free_ctx(struct ucma_context *ctx)
550 struct ucma_event *uevent, *tmp;
554 ucma_cleanup_multicast(ctx);
556 /* Cleanup events not yet reported to the user. */
557 mutex_lock(&ctx->file->mut);
558 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
559 if (uevent->ctx == ctx)
560 list_move_tail(&uevent->list, &list);
562 list_del(&ctx->list);
563 mutex_unlock(&ctx->file->mut);
565 list_for_each_entry_safe(uevent, tmp, &list, list) {
566 list_del(&uevent->list);
567 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
568 rdma_destroy_id(uevent->cm_id);
572 events_reported = ctx->events_reported;
574 return events_reported;
577 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
578 int in_len, int out_len)
580 struct rdma_ucm_destroy_id cmd;
581 struct rdma_ucm_destroy_id_resp resp;
582 struct ucma_context *ctx;
585 if (out_len < sizeof(resp))
588 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
592 ctx = _ucma_find_context(cmd.id, file);
594 idr_remove(&ctx_idr, ctx->id);
600 mutex_lock(&ctx->file->mut);
602 mutex_unlock(&ctx->file->mut);
604 flush_workqueue(ctx->file->close_wq);
605 /* At this point it's guaranteed that there is no inflight
611 wait_for_completion(&ctx->comp);
612 rdma_destroy_id(ctx->cm_id);
617 resp.events_reported = ucma_free_ctx(ctx);
618 if (copy_to_user((void __user *)(unsigned long)cmd.response,
619 &resp, sizeof(resp)))
625 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
626 int in_len, int out_len)
628 struct rdma_ucm_bind_ip cmd;
629 struct ucma_context *ctx;
632 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
635 if (!rdma_addr_size_in6(&cmd.addr))
638 ctx = ucma_get_ctx(file, cmd.id);
642 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
647 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
648 int in_len, int out_len)
650 struct rdma_ucm_bind cmd;
651 struct ucma_context *ctx;
654 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
657 if (cmd.reserved || !cmd.addr_size ||
658 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
661 ctx = ucma_get_ctx(file, cmd.id);
665 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
670 static ssize_t ucma_resolve_ip(struct ucma_file *file,
671 const char __user *inbuf,
672 int in_len, int out_len)
674 struct rdma_ucm_resolve_ip cmd;
675 struct ucma_context *ctx;
678 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
681 if (!rdma_addr_size_in6(&cmd.src_addr) ||
682 !rdma_addr_size_in6(&cmd.dst_addr))
685 ctx = ucma_get_ctx(file, cmd.id);
689 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
690 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
695 static ssize_t ucma_resolve_addr(struct ucma_file *file,
696 const char __user *inbuf,
697 int in_len, int out_len)
699 struct rdma_ucm_resolve_addr cmd;
700 struct ucma_context *ctx;
703 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
707 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
708 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
711 ctx = ucma_get_ctx(file, cmd.id);
715 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
716 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
721 static ssize_t ucma_resolve_route(struct ucma_file *file,
722 const char __user *inbuf,
723 int in_len, int out_len)
725 struct rdma_ucm_resolve_route cmd;
726 struct ucma_context *ctx;
729 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
732 ctx = ucma_get_ctx(file, cmd.id);
736 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
741 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
742 struct rdma_route *route)
744 struct rdma_dev_addr *dev_addr;
746 resp->num_paths = route->num_paths;
747 switch (route->num_paths) {
749 dev_addr = &route->addr.dev_addr;
750 rdma_addr_get_dgid(dev_addr,
751 (union ib_gid *) &resp->ib_route[0].dgid);
752 rdma_addr_get_sgid(dev_addr,
753 (union ib_gid *) &resp->ib_route[0].sgid);
754 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
757 ib_copy_path_rec_to_user(&resp->ib_route[1],
758 &route->path_rec[1]);
761 ib_copy_path_rec_to_user(&resp->ib_route[0],
762 &route->path_rec[0]);
769 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
770 struct rdma_route *route)
773 resp->num_paths = route->num_paths;
774 switch (route->num_paths) {
776 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
777 (union ib_gid *)&resp->ib_route[0].dgid);
778 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
779 (union ib_gid *)&resp->ib_route[0].sgid);
780 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
783 ib_copy_path_rec_to_user(&resp->ib_route[1],
784 &route->path_rec[1]);
787 ib_copy_path_rec_to_user(&resp->ib_route[0],
788 &route->path_rec[0]);
795 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
796 struct rdma_route *route)
798 struct rdma_dev_addr *dev_addr;
800 dev_addr = &route->addr.dev_addr;
801 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
802 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
805 static ssize_t ucma_query_route(struct ucma_file *file,
806 const char __user *inbuf,
807 int in_len, int out_len)
809 struct rdma_ucm_query cmd;
810 struct rdma_ucm_query_route_resp resp;
811 struct ucma_context *ctx;
812 struct sockaddr *addr;
815 if (out_len < sizeof(resp))
818 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
821 ctx = ucma_get_ctx(file, cmd.id);
825 memset(&resp, 0, sizeof resp);
826 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
827 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
828 sizeof(struct sockaddr_in) :
829 sizeof(struct sockaddr_in6));
830 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
831 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
832 sizeof(struct sockaddr_in) :
833 sizeof(struct sockaddr_in6));
834 if (!ctx->cm_id->device)
837 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
838 resp.port_num = ctx->cm_id->port_num;
840 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
841 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
842 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
843 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
844 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
845 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
848 if (copy_to_user((void __user *)(unsigned long)cmd.response,
849 &resp, sizeof(resp)))
856 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
857 struct rdma_ucm_query_addr_resp *resp)
862 resp->node_guid = (__force __u64) cm_id->device->node_guid;
863 resp->port_num = cm_id->port_num;
864 resp->pkey = (__force __u16) cpu_to_be16(
865 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
868 static ssize_t ucma_query_addr(struct ucma_context *ctx,
869 void __user *response, int out_len)
871 struct rdma_ucm_query_addr_resp resp;
872 struct sockaddr *addr;
875 if (out_len < sizeof(resp))
878 memset(&resp, 0, sizeof resp);
880 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
881 resp.src_size = rdma_addr_size(addr);
882 memcpy(&resp.src_addr, addr, resp.src_size);
884 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
885 resp.dst_size = rdma_addr_size(addr);
886 memcpy(&resp.dst_addr, addr, resp.dst_size);
888 ucma_query_device_addr(ctx->cm_id, &resp);
890 if (copy_to_user(response, &resp, sizeof(resp)))
896 static ssize_t ucma_query_path(struct ucma_context *ctx,
897 void __user *response, int out_len)
899 struct rdma_ucm_query_path_resp *resp;
902 if (out_len < sizeof(*resp))
905 resp = kzalloc(out_len, GFP_KERNEL);
909 resp->num_paths = ctx->cm_id->route.num_paths;
910 for (i = 0, out_len -= sizeof(*resp);
911 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
912 i++, out_len -= sizeof(struct ib_path_rec_data)) {
913 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
915 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
916 IB_PATH_BIDIRECTIONAL;
917 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
918 struct sa_path_rec ib;
920 sa_convert_path_opa_to_ib(&ib, rec);
921 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
924 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
928 if (copy_to_user(response, resp,
929 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
936 static ssize_t ucma_query_gid(struct ucma_context *ctx,
937 void __user *response, int out_len)
939 struct rdma_ucm_query_addr_resp resp;
940 struct sockaddr_ib *addr;
943 if (out_len < sizeof(resp))
946 memset(&resp, 0, sizeof resp);
948 ucma_query_device_addr(ctx->cm_id, &resp);
950 addr = (struct sockaddr_ib *) &resp.src_addr;
951 resp.src_size = sizeof(*addr);
952 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
953 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
955 addr->sib_family = AF_IB;
956 addr->sib_pkey = (__force __be16) resp.pkey;
957 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
959 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
960 &ctx->cm_id->route.addr.src_addr);
963 addr = (struct sockaddr_ib *) &resp.dst_addr;
964 resp.dst_size = sizeof(*addr);
965 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
966 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
968 addr->sib_family = AF_IB;
969 addr->sib_pkey = (__force __be16) resp.pkey;
970 rdma_read_gids(ctx->cm_id, NULL,
971 (union ib_gid *)&addr->sib_addr);
972 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
973 &ctx->cm_id->route.addr.dst_addr);
976 if (copy_to_user(response, &resp, sizeof(resp)))
982 static ssize_t ucma_query(struct ucma_file *file,
983 const char __user *inbuf,
984 int in_len, int out_len)
986 struct rdma_ucm_query cmd;
987 struct ucma_context *ctx;
988 void __user *response;
991 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
994 response = (void __user *)(unsigned long) cmd.response;
995 ctx = ucma_get_ctx(file, cmd.id);
999 switch (cmd.option) {
1000 case RDMA_USER_CM_QUERY_ADDR:
1001 ret = ucma_query_addr(ctx, response, out_len);
1003 case RDMA_USER_CM_QUERY_PATH:
1004 ret = ucma_query_path(ctx, response, out_len);
1006 case RDMA_USER_CM_QUERY_GID:
1007 ret = ucma_query_gid(ctx, response, out_len);
1018 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1019 struct rdma_conn_param *dst,
1020 struct rdma_ucm_conn_param *src)
1022 dst->private_data = src->private_data;
1023 dst->private_data_len = src->private_data_len;
1024 dst->responder_resources =src->responder_resources;
1025 dst->initiator_depth = src->initiator_depth;
1026 dst->flow_control = src->flow_control;
1027 dst->retry_count = src->retry_count;
1028 dst->rnr_retry_count = src->rnr_retry_count;
1029 dst->srq = src->srq;
1030 dst->qp_num = src->qp_num;
1031 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1034 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1035 int in_len, int out_len)
1037 struct rdma_ucm_connect cmd;
1038 struct rdma_conn_param conn_param;
1039 struct ucma_context *ctx;
1042 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1045 if (!cmd.conn_param.valid)
1048 ctx = ucma_get_ctx(file, cmd.id);
1050 return PTR_ERR(ctx);
1052 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1053 ret = rdma_connect(ctx->cm_id, &conn_param);
1058 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1059 int in_len, int out_len)
1061 struct rdma_ucm_listen cmd;
1062 struct ucma_context *ctx;
1065 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1068 ctx = ucma_get_ctx(file, cmd.id);
1070 return PTR_ERR(ctx);
1072 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1073 cmd.backlog : max_backlog;
1074 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1079 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1080 int in_len, int out_len)
1082 struct rdma_ucm_accept cmd;
1083 struct rdma_conn_param conn_param;
1084 struct ucma_context *ctx;
1087 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1090 ctx = ucma_get_ctx(file, cmd.id);
1092 return PTR_ERR(ctx);
1094 if (cmd.conn_param.valid) {
1095 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1096 mutex_lock(&file->mut);
1097 ret = rdma_accept(ctx->cm_id, &conn_param);
1100 mutex_unlock(&file->mut);
1102 ret = rdma_accept(ctx->cm_id, NULL);
1108 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1109 int in_len, int out_len)
1111 struct rdma_ucm_reject cmd;
1112 struct ucma_context *ctx;
1115 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1118 ctx = ucma_get_ctx(file, cmd.id);
1120 return PTR_ERR(ctx);
1122 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1127 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1128 int in_len, int out_len)
1130 struct rdma_ucm_disconnect cmd;
1131 struct ucma_context *ctx;
1134 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1137 ctx = ucma_get_ctx(file, cmd.id);
1139 return PTR_ERR(ctx);
1141 ret = rdma_disconnect(ctx->cm_id);
1146 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1147 const char __user *inbuf,
1148 int in_len, int out_len)
1150 struct rdma_ucm_init_qp_attr cmd;
1151 struct ib_uverbs_qp_attr resp;
1152 struct ucma_context *ctx;
1153 struct ib_qp_attr qp_attr;
1156 if (out_len < sizeof(resp))
1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1162 if (cmd.qp_state > IB_QPS_ERR)
1165 ctx = ucma_get_ctx(file, cmd.id);
1167 return PTR_ERR(ctx);
1169 if (!ctx->cm_id->device) {
1174 resp.qp_attr_mask = 0;
1175 memset(&qp_attr, 0, sizeof qp_attr);
1176 qp_attr.qp_state = cmd.qp_state;
1177 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1181 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1182 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1183 &resp, sizeof(resp)))
1191 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1192 void *optval, size_t optlen)
1197 case RDMA_OPTION_ID_TOS:
1198 if (optlen != sizeof(u8)) {
1202 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1204 case RDMA_OPTION_ID_REUSEADDR:
1205 if (optlen != sizeof(int)) {
1209 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1211 case RDMA_OPTION_ID_AFONLY:
1212 if (optlen != sizeof(int)) {
1216 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1225 static int ucma_set_ib_path(struct ucma_context *ctx,
1226 struct ib_path_rec_data *path_data, size_t optlen)
1228 struct sa_path_rec sa_path;
1229 struct rdma_cm_event event;
1232 if (optlen % sizeof(*path_data))
1235 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1236 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1237 IB_PATH_BIDIRECTIONAL))
1244 memset(&sa_path, 0, sizeof(sa_path));
1246 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1247 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1249 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1250 struct sa_path_rec opa;
1252 sa_convert_path_ib_to_opa(&opa, &sa_path);
1253 ret = rdma_set_ib_path(ctx->cm_id, &opa);
1255 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1260 memset(&event, 0, sizeof event);
1261 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1262 return ucma_event_handler(ctx->cm_id, &event);
1265 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1266 void *optval, size_t optlen)
1271 case RDMA_OPTION_IB_PATH:
1272 ret = ucma_set_ib_path(ctx, optval, optlen);
1281 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1282 int optname, void *optval, size_t optlen)
1287 case RDMA_OPTION_ID:
1288 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1290 case RDMA_OPTION_IB:
1291 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1300 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1301 int in_len, int out_len)
1303 struct rdma_ucm_set_option cmd;
1304 struct ucma_context *ctx;
1308 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1311 ctx = ucma_get_ctx(file, cmd.id);
1313 return PTR_ERR(ctx);
1315 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1318 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1320 if (IS_ERR(optval)) {
1321 ret = PTR_ERR(optval);
1325 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1334 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1335 int in_len, int out_len)
1337 struct rdma_ucm_notify cmd;
1338 struct ucma_context *ctx;
1341 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1344 ctx = ucma_get_ctx(file, cmd.id);
1346 return PTR_ERR(ctx);
1348 if (ctx->cm_id->device)
1349 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1355 static ssize_t ucma_process_join(struct ucma_file *file,
1356 struct rdma_ucm_join_mcast *cmd, int out_len)
1358 struct rdma_ucm_create_id_resp resp;
1359 struct ucma_context *ctx;
1360 struct ucma_multicast *mc;
1361 struct sockaddr *addr;
1365 if (out_len < sizeof(resp))
1368 addr = (struct sockaddr *) &cmd->addr;
1369 if (cmd->addr_size != rdma_addr_size(addr))
1372 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1373 join_state = BIT(FULLMEMBER_JOIN);
1374 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1375 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1379 ctx = ucma_get_ctx(file, cmd->id);
1381 return PTR_ERR(ctx);
1383 mutex_lock(&file->mut);
1384 mc = ucma_alloc_multicast(ctx);
1389 mc->join_state = join_state;
1391 memcpy(&mc->addr, addr, cmd->addr_size);
1392 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1398 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1399 &resp, sizeof(resp))) {
1404 mutex_unlock(&file->mut);
1409 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1410 ucma_cleanup_mc_events(mc);
1413 idr_remove(&multicast_idr, mc->id);
1415 list_del(&mc->list);
1418 mutex_unlock(&file->mut);
1423 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1424 const char __user *inbuf,
1425 int in_len, int out_len)
1427 struct rdma_ucm_join_ip_mcast cmd;
1428 struct rdma_ucm_join_mcast join_cmd;
1430 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1433 join_cmd.response = cmd.response;
1434 join_cmd.uid = cmd.uid;
1435 join_cmd.id = cmd.id;
1436 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1437 if (!join_cmd.addr_size)
1440 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1441 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1443 return ucma_process_join(file, &join_cmd, out_len);
1446 static ssize_t ucma_join_multicast(struct ucma_file *file,
1447 const char __user *inbuf,
1448 int in_len, int out_len)
1450 struct rdma_ucm_join_mcast cmd;
1452 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1455 if (!rdma_addr_size_kss(&cmd.addr))
1458 return ucma_process_join(file, &cmd, out_len);
1461 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1462 const char __user *inbuf,
1463 int in_len, int out_len)
1465 struct rdma_ucm_destroy_id cmd;
1466 struct rdma_ucm_destroy_id_resp resp;
1467 struct ucma_multicast *mc;
1470 if (out_len < sizeof(resp))
1473 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1477 mc = idr_find(&multicast_idr, cmd.id);
1479 mc = ERR_PTR(-ENOENT);
1480 else if (mc->ctx->file != file)
1481 mc = ERR_PTR(-EINVAL);
1482 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1483 mc = ERR_PTR(-ENXIO);
1485 idr_remove(&multicast_idr, mc->id);
1493 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1494 mutex_lock(&mc->ctx->file->mut);
1495 ucma_cleanup_mc_events(mc);
1496 list_del(&mc->list);
1497 mutex_unlock(&mc->ctx->file->mut);
1499 ucma_put_ctx(mc->ctx);
1500 resp.events_reported = mc->events_reported;
1503 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1504 &resp, sizeof(resp)))
1510 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1512 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1513 if (file1 < file2) {
1514 mutex_lock(&file1->mut);
1515 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1517 mutex_lock(&file2->mut);
1518 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1522 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1524 if (file1 < file2) {
1525 mutex_unlock(&file2->mut);
1526 mutex_unlock(&file1->mut);
1528 mutex_unlock(&file1->mut);
1529 mutex_unlock(&file2->mut);
1533 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1535 struct ucma_event *uevent, *tmp;
1537 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1538 if (uevent->ctx == ctx)
1539 list_move_tail(&uevent->list, &file->event_list);
1542 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1543 const char __user *inbuf,
1544 int in_len, int out_len)
1546 struct rdma_ucm_migrate_id cmd;
1547 struct rdma_ucm_migrate_resp resp;
1548 struct ucma_context *ctx;
1550 struct ucma_file *cur_file;
1553 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1556 /* Get current fd to protect against it being closed */
1561 /* Validate current fd and prevent destruction of id. */
1562 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1568 cur_file = ctx->file;
1569 if (cur_file == new_file) {
1570 resp.events_reported = ctx->events_reported;
1575 * Migrate events between fd's, maintaining order, and avoiding new
1576 * events being added before existing events.
1578 ucma_lock_files(cur_file, new_file);
1581 list_move_tail(&ctx->list, &new_file->ctx_list);
1582 ucma_move_events(ctx, new_file);
1583 ctx->file = new_file;
1584 resp.events_reported = ctx->events_reported;
1587 ucma_unlock_files(cur_file, new_file);
1590 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1591 &resp, sizeof(resp)))
1600 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1601 const char __user *inbuf,
1602 int in_len, int out_len) = {
1603 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1604 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1605 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1606 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1607 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1608 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1609 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1610 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1611 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1612 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1613 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1614 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1615 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1616 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1617 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1618 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1619 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1620 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1621 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1622 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1623 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1624 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1625 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1628 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1629 size_t len, loff_t *pos)
1631 struct ucma_file *file = filp->private_data;
1632 struct rdma_ucm_cmd_hdr hdr;
1635 if (!ib_safe_file_access(filp)) {
1636 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1637 task_tgid_vnr(current), current->comm);
1641 if (len < sizeof(hdr))
1644 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1647 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1650 if (hdr.in + sizeof(hdr) > len)
1653 if (!ucma_cmd_table[hdr.cmd])
1656 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1663 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1665 struct ucma_file *file = filp->private_data;
1668 poll_wait(filp, &file->poll_wait, wait);
1670 if (!list_empty(&file->event_list))
1671 mask = EPOLLIN | EPOLLRDNORM;
1677 * ucma_open() does not need the BKL:
1679 * - no global state is referred to;
1680 * - there is no ioctl method to race against;
1681 * - no further module initialization is required for open to work
1682 * after the device is registered.
1684 static int ucma_open(struct inode *inode, struct file *filp)
1686 struct ucma_file *file;
1688 file = kmalloc(sizeof *file, GFP_KERNEL);
1692 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1694 if (!file->close_wq) {
1699 INIT_LIST_HEAD(&file->event_list);
1700 INIT_LIST_HEAD(&file->ctx_list);
1701 init_waitqueue_head(&file->poll_wait);
1702 mutex_init(&file->mut);
1704 filp->private_data = file;
1707 return nonseekable_open(inode, filp);
1710 static int ucma_close(struct inode *inode, struct file *filp)
1712 struct ucma_file *file = filp->private_data;
1713 struct ucma_context *ctx, *tmp;
1715 mutex_lock(&file->mut);
1716 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1717 ctx->destroying = 1;
1718 mutex_unlock(&file->mut);
1721 idr_remove(&ctx_idr, ctx->id);
1724 flush_workqueue(file->close_wq);
1725 /* At that step once ctx was marked as destroying and workqueue
1726 * was flushed we are safe from any inflights handlers that
1727 * might put other closing task.
1730 if (!ctx->closing) {
1732 /* rdma_destroy_id ensures that no event handlers are
1733 * inflight for that id before releasing it.
1735 rdma_destroy_id(ctx->cm_id);
1741 mutex_lock(&file->mut);
1743 mutex_unlock(&file->mut);
1744 destroy_workqueue(file->close_wq);
1749 static const struct file_operations ucma_fops = {
1750 .owner = THIS_MODULE,
1752 .release = ucma_close,
1753 .write = ucma_write,
1755 .llseek = no_llseek,
1758 static struct miscdevice ucma_misc = {
1759 .minor = MISC_DYNAMIC_MINOR,
1761 .nodename = "infiniband/rdma_cm",
1766 static ssize_t show_abi_version(struct device *dev,
1767 struct device_attribute *attr,
1770 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1772 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1774 static int __init ucma_init(void)
1778 ret = misc_register(&ucma_misc);
1782 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1784 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1788 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1789 if (!ucma_ctl_table_hdr) {
1790 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1796 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1798 misc_deregister(&ucma_misc);
1802 static void __exit ucma_cleanup(void)
1804 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1805 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1806 misc_deregister(&ucma_misc);
1807 idr_destroy(&ctx_idr);
1808 idr_destroy(&multicast_idr);
1811 module_init(ucma_init);
1812 module_exit(ucma_cleanup);