of how to define a PHY.
Required properties:
- - reg : Offset and length of the register set for the device
+ - reg : Offset and length of the register set for the device, and optionally
+ the offset and length of the TBIPA register (TBI PHY address
+ register). If TBIPA register is not specified, the driver will
+ attempt to infer it from the register set specified (your mileage may
+ vary).
- compatible : Should define the compatible device type for the
mdio. Currently supported strings/devices are:
- "fsl,gianfar-tbi"
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x0 0x2d24000 0x0 0x4000>;
+ reg = <0x0 0x2d24000 0x0 0x4000>,
+ <0x0 0x2d10030 0x0 0x4>;
};
ptp_clock@2d10e00 {
void *private;
int err;
- /* If caller uses non-allowed flag, return error. */
- if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
- return -EINVAL;
-
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len < sizeof(*sa))
return -EINVAL;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
struct cavium_ptp;
-struct xcast_addr {
- struct list_head list;
- u64 addr;
-};
-
struct xcast_addr_list {
- struct list_head list;
int count;
+ u64 mc[];
};
struct nicvf_work {
work.work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
union nic_mbx mbx = {};
- struct xcast_addr *xaddr, *next;
+ int idx;
if (!vf_work)
return;
/* check if we have any specific MACs to be added to PF DMAC filter */
if (vf_work->mc) {
/* now go through kernel list of MACs and add them one by one */
- list_for_each_entry_safe(xaddr, next,
- &vf_work->mc->list, list) {
+ for (idx = 0; idx < vf_work->mc->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = xaddr->addr;
+ mbx.xcast.data.mac = vf_work->mc->mc[idx];
nicvf_send_msg_to_pf(nic, &mbx);
-
- /* after receiving ACK from PF release memory */
- list_del(&xaddr->list);
- kfree(xaddr);
- vf_work->mc->count--;
}
kfree(vf_work->mc);
}
mode |= BGX_XCAST_MCAST_FILTER;
/* here we need to copy mc addrs */
if (netdev_mc_count(netdev)) {
- struct xcast_addr *xaddr;
-
- mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
- INIT_LIST_HEAD(&mc_list->list);
+ mc_list = kmalloc(offsetof(typeof(*mc_list),
+ mc[netdev_mc_count(netdev)]),
+ GFP_ATOMIC);
+ if (unlikely(!mc_list))
+ return;
+ mc_list->count = 0;
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
- xaddr = kmalloc(sizeof(*xaddr),
- GFP_ATOMIC);
- xaddr->addr =
+ mc_list->mc[mc_list->count] =
ether_addr_to_u64(ha->addr);
- list_add_tail(&xaddr->list,
- &mc_list->list);
mc_list->count++;
}
}
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
+ uint32_t __iomem * (*get_tbipa)(void __iomem *),
+ void __iomem *reg_map, struct resource *reg_res)
+{
+ struct device_node *np = pdev->dev.of_node;
+ uint32_t __iomem *tbipa;
+ bool tbipa_mapped;
+
+ tbipa = of_iomap(np, 1);
+ if (tbipa) {
+ tbipa_mapped = true;
+ } else {
+ tbipa_mapped = false;
+ tbipa = (*get_tbipa)(reg_map);
+
+ /*
+ * Add consistency check to make sure TBI is contained within
+ * the mapped range (not because we would get a segfault,
+ * rather to catch bugs in computing TBI address). Print error
+ * message but continue anyway.
+ */
+ if ((void *)tbipa > reg_map + resource_size(reg_res) - 4)
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
+ ((void *)tbipa - reg_map) + 4);
+ }
+
+ iowrite32be(be32_to_cpu(tbipa_val), tbipa);
+
+ if (tbipa_mapped)
+ iounmap(tbipa);
+}
+
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
- uint32_t __iomem *tbipa;
-
if (!prop) {
dev_err(&pdev->dev,
"missing 'reg' property in node %pOF\n",
err = -EBUSY;
goto error;
}
-
- tbipa = data->get_tbipa(priv->map);
-
- /*
- * Add consistency check to make sure TBI is contained
- * within the mapped range (not because we would get a
- * segfault, rather to catch bugs in computing TBI
- * address). Print error message but continue anyway.
- */
- if ((void *)tbipa > priv->map + resource_size(&res) - 4)
- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
- ((void *)tbipa - priv->map) + 4);
-
- iowrite32be(be32_to_cpup(prop), tbipa);
+ set_tbipa(*prop, pdev,
+ data->get_tbipa, priv->map, &res);
}
}
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int init_crq_queue(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
dev_info(dev, "replenish pools failure\n");
pool->free_map[pool->next_free] = index;
pool->rx_buff[index].skb = NULL;
- if (!dma_mapping_error(dev, dma_addr))
- dma_unmap_single(dev, dma_addr, pool->buff_size,
- DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
- if (lpar_rc == H_CLOSED) {
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
- * queue is closed. Firmware guarantees that a signal will
- * be sent to the driver, triggering a reset.
+ * queue is closed or pending failover.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset.
*/
deactivate_rx_pools(adapter);
netif_carrier_off(adapter->netdev);
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
if (adapter->state != VNIC_CLOSED) {
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
if (rc)
return rc;
- ibmvnic_cleanup(netdev);
adapter->state = VNIC_CLOSED;
return 0;
}
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_CLOSED;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
+ ibmvnic_cleanup(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED) {
- /* Disable TX and report carrier off if queue is closed.
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
+ /* Disable TX and report carrier off if queue is closed
+ * or pending failover.
* Firmware guarantees that a signal will be sent to the
* driver, triggering a reset or some other action.
*/
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- return 0;
- ibmvnic_cleanup(netdev);
- } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
- ibmvnic_cleanup(netdev);
- } else {
+ ibmvnic_cleanup(netdev);
+
+ if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_FAILOVER) {
rc = __ibmvnic_close(netdev);
if (rc)
return rc;
*/
adapter->state = VNIC_PROBED;
+ if (adapter->wait_for_reset) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ release_sub_crqs(adapter, 1);
+ } else {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ }
+
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
rc = ibmvnic_init(adapter);
if (rc)
return IBMVNIC_INIT_FAILED;
mutex_unlock(&adapter->reset_lock);
}
-static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
- enum ibmvnic_reset_reason reason)
+static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ enum ibmvnic_reset_reason reason)
{
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
struct list_head *entry;
+ int ret;
if (adapter->state == VNIC_REMOVING ||
- adapter->state == VNIC_REMOVED) {
- netdev_dbg(netdev, "Adapter removing, skipping reset\n");
- return;
+ adapter->state == VNIC_REMOVED ||
+ adapter->failover_pending) {
+ ret = EBUSY;
+ netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
+ goto err;
}
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = EAGAIN;
- return;
+ ret = adapter->init_done_rc = EAGAIN;
+ goto err;
}
mutex_lock(&adapter->rwi_lock);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
mutex_unlock(&adapter->rwi_lock);
- return;
+ ret = EBUSY;
+ goto err;
}
}
if (!rwi) {
mutex_unlock(&adapter->rwi_lock);
ibmvnic_close(netdev);
- return;
+ ret = ENOMEM;
+ goto err;
}
rwi->reset_reason = reason;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
+
+ return 0;
+err:
+ if (adapter->wait_for_reset)
+ adapter->wait_for_reset = false;
+ return -ret;
}
static void ibmvnic_tx_timeout(struct net_device *dev)
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
+ int rc, ret;
+
adapter->fallback.mtu = adapter->req_mtu;
adapter->fallback.rx_queues = adapter->req_rx_queues;
adapter->fallback.tx_queues = adapter->req_tx_queues;
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->reset_done);
+ ret = 0;
if (adapter->reset_done_rc) {
+ ret = -EIO;
adapter->desired.mtu = adapter->fallback.mtu;
adapter->desired.rx_queues = adapter->fallback.rx_queues;
adapter->desired.tx_queues = adapter->fallback.tx_queues;
adapter->desired.tx_entries = adapter->fallback.tx_entries;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return ret;
wait_for_completion(&adapter->reset_done);
}
adapter->wait_for_reset = false;
- return adapter->reset_done_rc;
+ return ret;
}
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
}
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ atomic_set(&scrq->used, 0);
scrq->cur = 0;
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
union sub_crq *next;
int index;
int i, j;
- u8 first;
+ u8 *first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
txbuff->data_dma[j] = 0;
}
/* if sub_crq was sent indirectly */
- first = txbuff->indir_arr[0].generic.first;
- if (first == IBMVNIC_CRQ_CMD) {
+ first = &txbuff->indir_arr[0].generic.first;
+ if (*first == IBMVNIC_CRQ_CMD) {
dma_unmap_single(dev, txbuff->indir_dma,
sizeof(txbuff->indir_arr),
DMA_TO_DEVICE);
+ *first = 0;
}
if (txbuff->last_frag) {
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
/* If the number of queues requested can't be allocated by the
* server, the login response will return with code 1. We will need
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
+ adapter->failover_pending = false;
complete(&adapter->init_done);
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ adapter->failover_pending = true;
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
u64 old_num_rx_queues, old_num_tx_queues;
int rc;
- if (adapter->resetting && !adapter->wait_for_reset) {
- rc = ibmvnic_reset_crq(adapter);
- if (!rc)
- rc = vio_enable_interrupts(adapter->vdev);
- } else {
- rc = init_crq_queue(adapter);
- }
-
- if (rc) {
- dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
adapter->from_passive_init = false;
old_num_rx_queues = adapter->req_rx_queues;
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset) {
+ if (adapter->resetting && !adapter->wait_for_reset &&
+ adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
release_sub_crqs(adapter, 0);
adapter->mac_change_pending = false;
do {
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
+ rc);
+ goto ibmvnic_init_fail;
+ }
+
rc = ibmvnic_init(adapter);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
bool napi_enabled, from_passive_init;
bool mac_change_pending;
+ bool failover_pending;
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
- if (!mac_buf)
+ if (!mac_buf) {
+ status = ICE_ERR_NO_MEMORY;
goto err_unroll_fltr_mgmt_struct;
+ }
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
devm_kfree(ice_hw_to_dev(hw), mac_buf);
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return ARRAY_SIZE(ice_regs_dump_list);
+ return sizeof(ice_regs_dump_list);
}
static void
regs->version = 1;
- for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
+ for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
}
{
int i;
- if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
memset(pe, 0, sizeof(*pe));
},
};
-static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
-}
-
-static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
-};
-
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
kvd_size, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params,
- NULL);
+ &kvd_size_params);
if (err)
return err;
linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
- &linear_size_params,
- &mlxsw_sp_resource_kvd_linear_ops);
+ &linear_size_params);
if (err)
return err;
double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params,
- NULL);
+ &hash_double_size_params);
if (err)
return err;
single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params,
- NULL);
+ &hash_single_size_params);
if (err)
return err;
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
return occ;
}
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+static u64 mlxsw_sp_kvdl_occ_get(void *priv)
{
+ const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
return occ;
}
-static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
- .occ_get = mlxsw_sp_kvdl_single_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
- .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
- .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
-};
-
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_single_ops);
+ &size_params);
if (err)
return err;
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_ops);
+ &size_params);
if (err)
return err;
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_large_ops);
+ &size_params);
return err;
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl;
int err;
if (err)
goto err_kvdl_parts_init;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp_kvdl_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp_kvdl_single_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp_kvdl_chunks_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp_kvdl_large_chunks_occ_get,
+ mlxsw_sp);
+
return 0;
err_kvdl_parts_init:
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
kfree(mlxsw_sp->kvdl);
}
call_rcu(&nvdev->rcu, free_netvsc_device);
}
-static void netvsc_revoke_buf(struct hv_device *device,
- struct netvsc_device *net_device)
+static void netvsc_revoke_recv_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
{
struct nvsp_message *revoke_packet;
- struct net_device *ndev = hv_get_drvdata(device);
int ret;
/*
}
net_device->recv_section_cnt = 0;
}
+}
+
+static void netvsc_revoke_send_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ struct nvsp_message *revoke_packet;
+ int ret;
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
}
}
-static void netvsc_teardown_gpadl(struct hv_device *device,
- struct netvsc_device *net_device)
+static void netvsc_teardown_recv_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
{
- struct net_device *ndev = hv_get_drvdata(device);
int ret;
if (net_device->recv_buf_gpadl_handle) {
}
net_device->recv_buf_gpadl_handle = 0;
}
+}
+
+static void netvsc_teardown_send_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ int ret;
if (net_device->send_buf_gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
goto exit;
cleanup:
- netvsc_revoke_buf(device, net_device);
- netvsc_teardown_gpadl(device, net_device);
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
exit:
return ret;
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
-
trace_nvsp_send(ndev, init_packet);
/* Send the init request */
= rtnl_dereference(net_device_ctx->nvdev);
int i;
- netvsc_revoke_buf(device, net_device);
+ /*
+ * Revoke receive buffer. If host is pre-Win2016 then tear down
+ * receive buffer GPADL. Do the same for send buffer.
+ */
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
*/
netdev_dbg(ndev, "net device safe to remove\n");
- /* older versions require that buffer be revoked before close */
- if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
- netvsc_teardown_gpadl(device, net_device);
-
/* Now, we can close the channel safely */
vmbus_close(device->channel);
- if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
- netvsc_teardown_gpadl(device, net_device);
+ /*
+ * If host is Win2016 or higher then we do the GPADL tear down
+ * here after VMBus is closed.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10) {
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
+ }
/* Release all resources */
free_netvsc_device_rcu(net_device);
/* IPv4
*/
-static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink)
+static u64 nsim_ipv4_fib_resource_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
}
-static struct devlink_resource_ops nsim_ipv4_fib_res_ops = {
- .occ_get = nsim_ipv4_fib_resource_occ_get,
-};
-
-static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink)
+static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
}
-static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = {
- .occ_get = nsim_ipv4_fib_rules_res_occ_get,
-};
-
/* IPv6
*/
-static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink)
+static u64 nsim_ipv6_fib_resource_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
}
-static struct devlink_resource_ops nsim_ipv6_fib_res_ops = {
- .occ_get = nsim_ipv6_fib_resource_occ_get,
-};
-
-static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink)
+static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
-static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = {
- .occ_get = nsim_ipv6_fib_rules_res_occ_get,
-};
-
static int devlink_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
NSIM_RESOURCE_IPV4,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- ¶ms, NULL);
+ ¶ms);
if (err) {
pr_err("Failed to register IPv4 top resource\n");
goto out;
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4,
- ¶ms, &nsim_ipv4_fib_res_ops);
+ NSIM_RESOURCE_IPV4, ¶ms);
if (err) {
pr_err("Failed to register IPv4 FIB resource\n");
return err;
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4,
- ¶ms, &nsim_ipv4_fib_rules_res_ops);
+ NSIM_RESOURCE_IPV4, ¶ms);
if (err) {
pr_err("Failed to register IPv4 FIB rules resource\n");
return err;
err = devlink_resource_register(devlink, "IPv6", (u64)-1,
NSIM_RESOURCE_IPV6,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- ¶ms, NULL);
+ ¶ms);
if (err) {
pr_err("Failed to register IPv6 top resource\n");
goto out;
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6,
- ¶ms, &nsim_ipv6_fib_res_ops);
+ NSIM_RESOURCE_IPV6, ¶ms);
if (err) {
pr_err("Failed to register IPv6 FIB resource\n");
return err;
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6,
- ¶ms, &nsim_ipv6_fib_rules_res_ops);
+ NSIM_RESOURCE_IPV6, ¶ms);
if (err) {
pr_err("Failed to register IPv6 FIB rules resource\n");
return err;
}
+
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_ipv4_fib_resource_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_ipv4_fib_rules_res_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_ipv6_fib_resource_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_ipv6_fib_rules_res_occ_get,
+ net);
out:
return err;
}
kfree(dp83640);
}
+static int dp83640_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* From DP83640 datasheet: "Software driver code must wait 3 us
+ * following a software reset before allowing further serial MII
+ * operations with the DP83640."
+ */
+ udelay(10); /* Taking udelay inaccuracy into account */
+
+ return 0;
+}
+
static int dp83640_config_init(struct phy_device *phydev)
{
struct dp83640_private *dp83640 = phydev->priv;
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .soft_reset = dp83640_soft_reset,
.config_init = dp83640_config_init,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
return marvell_config_init(phydev);
}
+static int m88e1318_config_init(struct phy_device *phydev)
+{
+ if (phy_interrupt_is_valid(phydev)) {
+ int err = phy_modify_paged(
+ phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_TCR,
+ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+ MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+ MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
+ if (err < 0)
+ return err;
+ }
+
+ return m88e1121_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
phydev->advertising &= ~pause;
}
- return m88e1121_config_init(phydev);
+ return m88e1318_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &m88e1121_config_init,
+ .config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving rx. */
+#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2)
+
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
struct socket *sock;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
+ int sent_pkts = 0;
mutex_lock(&vq->mutex);
sock = vq->private_data;
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
+ unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) {
vhost_poll_queue(&vq->poll);
break;
}
u16 conn_timeout);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role);
+ u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
unsigned int headers_count;
};
-/**
- * struct devlink_resource_ops - resource ops
- * @occ_get: get the occupied size
- */
-struct devlink_resource_ops {
- u64 (*occ_get)(struct devlink *devlink);
-};
-
/**
* struct devlink_resource_size_params - resource's size parameters
* @size_min: minimum size which can be set
size_params->unit = unit;
}
+typedef u64 devlink_resource_occ_get_t(void *priv);
+
/**
* struct devlink_resource - devlink resource
* @name: name of the resource
* @size_params: size parameters
* @list: parent list
* @resource_list: list of child resources
- * @resource_ops: resource ops
*/
struct devlink_resource {
const char *name;
struct devlink_resource_size_params size_params;
struct list_head list;
struct list_head resource_list;
- const struct devlink_resource_ops *resource_ops;
+ devlink_resource_occ_get_t *occ_get;
+ void *occ_get_priv;
};
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params,
- const struct devlink_resource_ops *resource_ops);
+ const struct devlink_resource_size_params *size_params);
void devlink_resources_unregister(struct devlink *devlink,
struct devlink_resource *resource);
int devlink_resource_size_get(struct devlink *devlink,
int devlink_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
+void devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
#else
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params,
- const struct devlink_resource_ops *resource_ops)
+ const struct devlink_resource_size_params *size_params)
{
return 0;
}
return -EOPNOTSUPP;
}
+static inline void
+devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+}
+
+static inline void
+devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
#define tw_ipv6only __tw_common.skc_ipv6only
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_nulls_node
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
- return remaining >= sizeof(*rtnh) &&
+ return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}
psock->cork = NULL;
}
- sk->sk_prot = psock->sk_proto;
- psock->sk_proto = NULL;
+ if (psock->sk_proto) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
out:
rcu_read_unlock();
}
close_fun = psock->save_close;
write_lock_bh(&sk->sk_callback_lock);
+ if (psock->cork) {
+ free_start_sg(psock->sock, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
list_del(&md->list);
free_start_sg(psock->sock, md);
}
}
-static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
- enum bpf_attach_type attach_type)
-{
- switch (prog->type) {
- case BPF_PROG_TYPE_CGROUP_SOCK:
- case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
- default:
- return 0;
- }
-}
-
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
#ifdef CONFIG_CGROUP_BPF
+static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ enum bpf_attach_type attach_type)
+{
+ switch (prog->type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
+ default:
+ return 0;
+ }
+}
+
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
static int sockmap_get_from_fd(const union bpf_attr *attr,
}
static void hci_req_add_le_create_conn(struct hci_request *req,
- struct hci_conn *conn)
+ struct hci_conn *conn,
+ bdaddr_t *direct_rpa)
{
struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev;
u8 own_addr_type;
- /* Update random address, but set require_privacy to false so
- * that we never connect with an non-resolvable address.
+ /* If direct address was provided we use it instead of current
+ * address.
*/
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type))
- return;
+ if (direct_rpa) {
+ if (bacmp(&req->hdev->random_addr, direct_rpa))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ direct_rpa);
+
+ /* direct address is always RPA */
+ own_addr_type = ADDR_LE_DEV_RANDOM;
+ } else {
+ /* Update random address, but set require_privacy to false so
+ * that we never connect with an non-resolvable address.
+ */
+ if (hci_update_random_address(req, false, conn_use_rpa(conn),
+ &own_addr_type))
+ return;
+ }
memset(&cp, 0, sizeof(cp));
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role)
+ u8 role, bdaddr_t *direct_rpa)
{
struct hci_conn_params *params;
struct hci_conn *conn;
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
- hci_req_add_le_create_conn(&req, conn);
+ hci_req_add_le_create_conn(&req, conn, direct_rpa);
create_conn:
err = hci_req_run(&req, create_le_conn_complete);
/* This function requires the caller holds hdev->lock */
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
- u8 addr_type, u8 adv_type)
+ u8 addr_type, u8 adv_type,
+ bdaddr_t *direct_rpa)
{
struct hci_conn *conn;
struct hci_conn_params *params;
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
- HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
+ HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
+ direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
bdaddr_type = irk->addr_type;
}
- /* Check if we have been requested to connect to this device */
- conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+ /* Check if we have been requested to connect to this device.
+ *
+ * direct_addr is set only for directed advertising reports (it is NULL
+ * for advertising reports) and is already verified to be RPA above.
+ */
+ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
+ direct_addr);
if (conn && type == LE_ADV_IND) {
/* Store report for later inclusion by
* mgmt_device_connected
hcon = hci_connect_le(hdev, dst, dst_type,
chan->sec_level,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, NULL);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level,
{
if (*name == '\0')
return false;
- if (strlen(name) >= IFNAMSIZ)
+ if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
return false;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return false;
return -EINVAL;
list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
+ if (ha->type == addr_type &&
+ !memcmp(ha->addr, addr, addr_len)) {
if (global) {
/* check if addr is already used as global */
if (ha->global_use)
return 0;
}
+static int devlink_resource_occ_put(struct devlink_resource *resource,
+ struct sk_buff *skb)
+{
+ if (!resource->occ_get)
+ return 0;
+ return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ resource->occ_get(resource->occ_get_priv),
+ DEVLINK_ATTR_PAD);
+}
+
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
struct devlink_resource *resource)
{
if (resource->size != resource->size_new)
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
resource->size_new, DEVLINK_ATTR_PAD);
- if (resource->resource_ops && resource->resource_ops->occ_get)
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
- resource->resource_ops->occ_get(devlink),
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
+ if (devlink_resource_occ_put(resource, skb))
+ goto nla_put_failure;
if (devlink_resource_size_params_put(resource, skb))
goto nla_put_failure;
if (list_empty(&resource->resource_list))
* @resource_id: resource's id
* @parent_reosurce_id: resource's parent id
* @size params: size parameters
- * @resource_ops: resource ops
*/
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params,
- const struct devlink_resource_ops *resource_ops)
+ const struct devlink_resource_size_params *size_params)
{
struct devlink_resource *resource;
struct list_head *resource_list;
resource->size = resource_size;
resource->size_new = resource_size;
resource->id = resource_id;
- resource->resource_ops = resource_ops;
resource->size_valid = true;
memcpy(&resource->size_params, size_params,
sizeof(resource->size_params));
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
+/**
+ * devlink_resource_occ_get_register - register occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ */
+void devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ struct devlink_resource *resource;
+
+ mutex_lock(&devlink->lock);
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (WARN_ON(!resource))
+ goto out;
+ WARN_ON(resource->occ_get);
+
+ resource->occ_get = occ_get;
+ resource->occ_get_priv = occ_get_priv;
+out:
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
+
+/**
+ * devlink_resource_occ_get_unregister - unregister occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ */
+void devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
+ struct devlink_resource *resource;
+
+ mutex_lock(&devlink->lock);
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (WARN_ON(!resource))
+ goto out;
+ WARN_ON(!resource->occ_get);
+
+ resource->occ_get = NULL;
+ resource->occ_get_priv = NULL;
+out:
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
+
static int __init devlink_module_init(void)
{
return genl_register_family(&devlink_nl_family);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
+ n->peeked = 0;
n->destructor = NULL;
C(tail);
C(end);
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if;
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_switch *ds;
+ struct dsa_port *slave_port;
if (device < 0 || device >= DSA_MAX_SWITCHES)
return NULL;
if (port < 0 || port >= ds->num_ports)
return NULL;
- return ds->ports[port].slave;
+ slave_port = &ds->ports[port];
+
+ if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
+ return NULL;
+
+ return slave_port->slave;
}
/* port.c */
/*unsigned long now; */
struct net *net = dev_net(dev);
- rt = ip_route_output(net, sip, tip, 0, 0);
+ rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
if (IS_ERR(rt))
return 1;
if (rt->dst.dev != dev) {
tw->tw_dport = inet->inet_dport;
tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse;
+ tw->tw_reuseport = sk->sk_reuseport;
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_transparent = inet->transparent;
p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
if (p) {
p->daddr = *daddr;
+ p->dtime = (__u32)jiffies;
refcount_set(&p->refcnt, 2);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
struct net_device *dev;
char name[IFNAMSIZ];
- if (parms->name[0])
+ err = -E2BIG;
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
- err = -E2BIG;
+ } else {
+ if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
- }
strlcpy(name, ops->kind, IFNAMSIZ);
strncat(name, "%d", 2);
}
const struct sk_buff *skb)
{
__u8 tos = RT_FL_TOS(fl4);
- struct fib_result res;
+ struct fib_result res = {
+ .type = RTN_UNSPEC,
+ .fi = NULL,
+ .table = NULL,
+ .tclassid = 0,
+ };
struct rtable *rth;
- res.tclassid = 0;
- res.fi = NULL;
- res.table = NULL;
-
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
if (t || !create)
return t;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ return NULL;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "ip6gre%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6gre_tunnel_setup);
if (!dev)
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ struct dst_entry *dst = skb_dst(skb);
+
+ __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+ __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+
return dst_output(net, sk, skb);
}
hdr->hop_limit--;
- __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dst->dev,
ip6_forward_finish);
struct net_device *dev;
struct ip6_tnl *t;
char name[IFNAMSIZ];
- int err = -ENOMEM;
+ int err = -E2BIG;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6tnl%%d");
-
+ }
+ err = -ENOMEM;
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
if (!dev)
char name[IFNAMSIZ];
int err;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6_vti%%d");
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
if (!dev)
if (!create)
goto failed;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "sit%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (!dev)
if (msg->msg_namelen) {
err = -EINVAL;
+ if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+ goto out;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
{
- if (cfg->is_ebpf)
- bpf_prog_put(cfg->filter);
- else
- bpf_prog_destroy(cfg->filter);
+ struct bpf_prog *filter = cfg->filter;
+
+ if (filter) {
+ if (cfg->is_ebpf)
+ bpf_prog_put(filter);
+ else
+ bpf_prog_destroy(filter);
+ }
kfree(cfg->bpf_ops);
kfree(cfg->bpf_name);
RCU_INIT_POINTER(*kp, key->next);
tcf_unbind_filter(tp, &key->res);
+ idr_remove(&ht->handle_idr, key->handle);
tcf_exts_get_net(&key->exts);
call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
return 0;
sctp_v6_map_v4(addr);
}
- if (addr->sa.sa_family == AF_INET)
+ if (addr->sa.sa_family == AF_INET) {
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
+ }
return sizeof(struct sockaddr_in6);
}
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
- /* V4 mapped address are really of AF_INET family */
- if (addr->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
- !opt->pf->af_supported(AF_INET, opt))
- return NULL;
+ if (addr->sa.sa_family == AF_INET6) {
+ if (len < SIN6_LEN_RFC2133)
+ return NULL;
+ /* V4 mapped address are really of AF_INET family */
+ if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
+ }
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
if (!nlh)
return -EMSGSIZE;
- err = tipc_sk_fill_sock_diag(skb, tsk, req->tidiag_states,
+ err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
__tipc_diag_gen_cookie);
if (err)
return err;
}
EXPORT_SYMBOL(tipc_nl_sk_walk);
-int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
- u32 sk_filter_state,
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
{
struct sock *sk = &tsk->sk;
nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
- from_kuid_munged(sk_user_ns(NETLINK_CB(skb).sk),
+ from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
sock_i_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
void tipc_sk_rht_destroy(struct net *net);
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
-int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
- u32 sk_filter_state,
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk));
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,