directions between the application and the hardware driver.
Format and semantics of CAPI messages are specified in the CAPI 2.0 standard.
-This standard is freely available from http://www.capi.org.
+This standard is freely available from https://www.capi.org.
2. Driver and Device Registration
de.alt.comm.isdn4linux
There is also a well maintained FAQ in English available at
- http://www.mhessler.de/i4lfaq/
+ https://www.mhessler.de/i4lfaq/
It can be viewed online, or downloaded in sgml/text/html format.
The FAQ can also be viewed online at
- http://www.isdn4linux.de/faq/
+ https://www.isdn4linux.de/faq/i4lfaq.html
or downloaded from
ftp://ftp.isdn4linux.de/pub/isdn4linux/FAQ/
In case you just want to see the FAQ online, or download the newest version,
you can have a look at my website:
-http://www.mhessler.de/i4lfaq/ (view + download)
+https://www.mhessler.de/i4lfaq/ (view + download)
or:
-http://www.isdn4linux.de/faq/ (view)
+https://www.isdn4linux.de/faq/4lfaq.html (view)
As the extension tells, the FAQ is in SGML format, and you can convert it
into text/html/... format by using the sgml2txt/sgml2html/... tools.
T-Com Sinus 721 data
Chicago 390 USB (KPN)
- See also http://www.erbze.info/sinus_gigaset.htm and
- http://gigaset307x.sourceforge.net/
+ See also http://www.erbze.info/sinus_gigaset.htm
+ (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) and
+ http://gigaset307x.sourceforge.net/
We had also reports from users of Gigaset M105 who could use the drivers
with SX 100 and CX 100 ISDN bases (only in unimodem mode, see section 2.5.)
to use CAPI 2.0 or ISDN4Linux for ISDN connections (voice or data).
There are some user space tools available at
- http://sourceforge.net/projects/gigaset307x/
+ https://sourceforge.net/projects/gigaset307x/
which provide access to additional device specific functions like SMS,
phonebook or call journal.
You can use some configuration tool of your distribution to configure this
"modem" or configure pppd/wvdial manually. There are some example ppp
configuration files and chat scripts in the gigaset-VERSION/ppp directory
- in the driver packages from http://sourceforge.net/projects/gigaset307x/.
+ in the driver packages from https://sourceforge.net/projects/gigaset307x/.
Please note that the USB drivers are not able to change the state of the
control lines. This means you must use "Stupid Mode" if you are using
wvdial or you should use the nocrtscts option of pppd.
---------------------------
If you can't solve problems with the driver on your own, feel free to
use one of the forums, bug trackers, or mailing lists on
- http://sourceforge.net/projects/gigaset307x
+ https://sourceforge.net/projects/gigaset307x
or write an electronic mail to the maintainers.
Try to provide as much information as possible, such as
4. Links, other software
---------------------
- Sourceforge project developing this driver and associated tools
- http://sourceforge.net/projects/gigaset307x
+ https://sourceforge.net/projects/gigaset307x
- Yahoo! Group on the Siemens Gigaset family of devices
- http://de.groups.yahoo.com/group/Siemens-Gigaset
+ https://de.groups.yahoo.com/group/Siemens-Gigaset
- Siemens Gigaset/T-Sinus compatibility table
http://www.erbze.info/sinus_gigaset.htm
+ (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm )
5. Credits
" Size of Tx Buffer : %u\n"
" Number of Rx Buffer: %u\n"
" Size of Rx Buffer : %u\n"
- " Packets Receiverd : %u\n"
+ " Packets Received : %u\n"
" Packets Transmitted: %u\n"
" Cells Received : %u\n"
" Cells Transmitted : %u\n"
goto err_close;
}
- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
goto err_upper_unlink;
}
+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
return 0;
/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond, new_slave);
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
{ .compatible = "mediatek,mt7530" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, mt7530_of_match);
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mediatek-mt7530");
}
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+ q_map = 0;
/* Enable all initialized RXQs. */
for (queue = 0; queue < rxq_number; queue++) {
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (priv->cee_config.pfc_state) {
int tc;
+ rx_ppp = prof->rx_ppp;
+ tx_ppp = prof->tx_ppp;
- priv->prof->rx_pause = 0;
- priv->prof->tx_pause = 0;
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_full:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp |= tc_mask;
break;
case pfc_enabled_tx:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_rx:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp |= tc_mask;
break;
default:
break;
}
}
- en_dbg(DRV, priv, "Set pfc on\n");
+ rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
+ tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
} else {
- priv->prof->rx_pause = 1;
- priv->prof->tx_pause = 1;
- en_dbg(DRV, priv, "Set pfc off\n");
+ rx_ppp = 0;
+ tx_ppp = 0;
+ rx_pause = prof->rx_pause;
+ tx_pause = prof->tx_pause;
}
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp)) {
+ tx_pause, tx_ppp, rx_pause, rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
return 1;
}
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->tx_pause = tx_pause;
+ prof->rx_pause = rx_pause;
+
return 0;
}
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
pfc->mbc,
pfc->delay);
- prof->rx_pause = !pfc->pfc_en;
- prof->tx_pause = !pfc->pfc_en;
- prof->rx_ppp = pfc->pfc_en;
- prof->tx_ppp = pfc->pfc_en;
+ rx_pause = prof->rx_pause && !pfc->pfc_en;
+ tx_pause = prof->tx_pause && !pfc->pfc_en;
+ rx_ppp = pfc->pfc_en;
+ tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- prof->tx_pause,
- prof->tx_ppp,
- prof->rx_pause,
- prof->rx_ppp);
- if (err)
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- prof->rx_ppp, prof->rx_pause,
- prof->tx_ppp, prof->tx_pause);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->rx_pause = rx_pause;
+ prof->tx_pause = tx_pause;
return err;
}
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
if (pause->autoneg)
return -EINVAL;
- priv->prof->tx_pause = pause->tx_pause != 0;
- priv->prof->rx_pause = pause->rx_pause != 0;
+ tx_pause = !!(pause->tx_pause);
+ rx_pause = !!(pause->rx_pause);
+ rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+ tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
- en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- priv->prof->rx_ppp,
- priv->prof->rx_pause,
- priv->prof->tx_ppp,
- priv->prof->tx_pause);
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
+ en_err(priv, "Failed setting pause params, err = %d\n", err);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ priv->prof->tx_pause = tx_pause;
+ priv->prof->rx_pause = rx_pause;
+ priv->prof->tx_ppp = tx_ppp;
+ priv->prof->rx_ppp = rx_ppp;
return err;
}
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = 1;
+ params->prof[i].rx_pause = !(pfcrx || pfctx);
params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = 1;
+ params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule->mirr_mbox);
kfree(fs_rule);
state = 0;
break;
config MLX5_ESWITCH
bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
- depends on MLX5_CORE_EN
+ depends on MLX5_CORE_EN && NET_SWITCHDEV
default y
---help---
Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
return mlx5e_ethtool_get_coalesce(priv, coal);
}
+#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
+#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
+
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
+ netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
+ __func__, MLX5E_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
+ coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
+ netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
+ __func__, MLX5E_MAX_COAL_FRAMES);
+ return -ERANGE;
+ }
+
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
+ if (mlx5e_vxlan_allowed(priv->mdev))
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
}
}
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
static const struct switchdev_ops mlx5e_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
mlx5e_set_netdev_dev_addr(netdev);
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
if (MLX5_VPORT_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
- /* Device already registered: sync netdev system state */
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
queue_work(priv->wq, &priv->set_rx_mode_work);
#include "en_tc.h"
#include "fs_core.h"
+#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
+ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
+ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
+
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+ u32 *sqns_array, int sqns_num)
{
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_channel *c;
int n, tc, num_sqs = 0;
int err = -ENOMEM;
- u16 *sqs;
+ u32 *sqs;
- sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
+ sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
if (!sqs)
goto out;
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
{
#if IS_ENABLED(CONFIG_IPV6)
- unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
+ unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
DELAY_PROBE_TIME);
#else
unsigned long ipv6_interval = ~0UL;
case NETEVENT_NEIGH_UPDATE:
n = ptr;
#if IS_ENABLED(CONFIG_IPV6)
- if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+ if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
#else
if (n->tbl != &arp_tbl)
#endif
* done per device delay prob time parameter.
*/
#if IS_ENABLED(CONFIG_IPV6)
- if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+ if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
#else
if (!p->dev || p->tbl != &arp_tbl)
#endif
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
mutex_lock(&priv->state_lock);
if (err)
goto unlock;
- if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
- MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ if (!mlx5_modify_vport_admin_state(priv->mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
mutex_lock(&priv->state_lock);
- (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ mlx5_modify_vport_admin_state(priv->mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
- params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
-#ifdef CONFIG_NET_SWITCHDEV
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
-#endif
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
tbl = &arp_tbl;
#if IS_ENABLED(CONFIG_IPV6)
else if (m_neigh->family == AF_INET6)
- tbl = ipv6_stub->nd_tbl;
+ tbl = &nd_tbl;
#endif
else
return;
if (err != -EAGAIN)
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
+ !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ kvfree(parse_attr);
+
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
- if (err)
- goto err_del_rule;
+ if (err) {
+ mlx5e_tc_del_flow(priv, flow);
+ kfree(flow);
+ }
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
- !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
- kvfree(parse_attr);
return err;
-err_del_rule:
- mlx5e_tc_del_flow(priv, flow);
-
err_free:
kvfree(parse_attr);
kfree(flow);
decap_fib_entry);
}
+static int
+mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
+ struct mlxsw_sp_vr *ul_vr, bool enable)
+{
+ struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ struct mlxsw_sp_rif *rif = &lb_rif->common;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u32 saddr4;
+
+ switch (lb_cf.ul_protocol) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
+ MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
+ ul_vr->id, saddr4, lb_cf.okey);
+ break;
+
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return -EAFNOSUPPORT;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_rif_ipip_lb *lb_rif;
+ struct mlxsw_sp_vr *ul_vr;
+ int err = 0;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry) {
+ lb_rif = ipip_entry->ol_lb;
+ ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ if (err)
+ goto out;
+ lb_rif->common.mtu = ol_dev->mtu;
+ }
+
+out:
+ return err;
+}
+
static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ol_dev)
{
extack = info->extack;
return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
ol_dev, extack);
+ case NETDEV_CHANGEMTU:
+ return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
}
return 0;
}
rif_lb->lb_config = params_lb->lb_config;
}
-static int
-mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
- struct mlxsw_sp_vr *ul_vr, bool enable)
-{
- struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
- struct mlxsw_sp_rif *rif = &lb_rif->common;
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u32 saddr4;
-
- switch (lb_cf.ul_protocol) {
- case MLXSW_SP_L3_PROTO_IPV4:
- saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
- mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
- rif->rif_index, rif->vr_id, rif->dev->mtu);
- mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr->id, saddr4, lb_cf.okey);
- break;
-
- case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
static int
mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{
static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
{
- if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
+ if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
+ pr_warn("instruction limit reached (%u NFP instructions)\n",
+ nfp_prog->prog_len);
nfp_prog->error = -ENOSPC;
return;
}
err = cb(nfp_prog, meta);
if (err)
return err;
+ if (nfp_prog->error)
+ return nfp_prog->error;
nfp_prog->n_translated++;
}
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
+ /* Fence required to flush the write combined buffer, since another
+ * CPU may write to the same doorbell address and data may be lost
+ * due to relaxed order nature of write combined bar.
*/
- mmiowb();
+ wmb();
}
static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
rxq->rx_ip_frags++;
- } else {
- DP_NOTICE(edev,
- "CQE has error, flags = %x, dropping incoming packet\n",
- parse_flag);
+ else
rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
- return 0;
- }
}
/* Basic validation passed; Need to prepare an SKB. This would also
if (!tp->counters)
return -ENOMEM;
+ pci_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0)
return rc;
- pci_set_drvdata(pdev, dev);
-
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
if (flags & IFF_PROMISC) {
filter = NDIS_PACKET_TYPE_PROMISCUOUS;
} else {
- if (flags & IFF_ALLMULTI)
+ if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
if (flags & IFF_BROADCAST)
filter |= NDIS_PACKET_TYPE_BROADCAST;
goto err_dev_open;
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
-
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
netdev_err(dev, "Failed to add vlan ids to device %s\n",
goto err_option_port_add;
}
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
dev_close(port_dev);
err_dev_open:
u32 buf;
int ret = 0;
unsigned long timeout;
+ u8 sig;
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
buf |= HW_CFG_LRST_;
/* LAN7801 only has RGMII mode */
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
+
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+ if (!ret && sig != EEPROM_INDICATOR) {
+ /* Implies there is no external eeprom. Set mac speed */
+ netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
+ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+ }
+ }
ret = lan78xx_write_reg(dev, MAC_CR, buf);
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
if (ret < 0) {
netdev_warn(dev->net,
"lan78xx_setup_irq_domain() failed : %d", ret);
- kfree(pdata);
- return ret;
+ goto out1;
}
dev->net->hard_header_len += TX_OVERHEAD;
/* Init all registers */
ret = lan78xx_reset(dev);
+ if (ret) {
+ netdev_warn(dev->net, "Registers INIT FAILED....");
+ goto out2;
+ }
ret = lan78xx_mdio_init(dev);
+ if (ret) {
+ netdev_warn(dev->net, "MDIO INIT FAILED.....");
+ goto out2;
+ }
dev->net->flags |= IFF_MULTICAST;
pdata->wol = WAKE_MAGIC;
return ret;
+
+out2:
+ lan78xx_remove_irq_domain(dev);
+
+out1:
+ netdev_warn(dev->net, "Bind routine FAILED");
+ cancel_work_sync(&pdata->set_multicast);
+ cancel_work_sync(&pdata->set_vlan);
+ kfree(pdata);
+ return ret;
}
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
lan78xx_remove_mdio(dev);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
+ cancel_work_sync(&pdata->set_vlan);
netif_dbg(dev, ifdown, dev->net, "free pdata");
kfree(pdata);
pdata = NULL;
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
ret = neigh_output(neigh, skb);
+ rcu_read_unlock_bh();
+ return ret;
}
rcu_read_unlock_bh();
err:
- if (unlikely(ret < 0))
- vrf_tx_error(skb->dev, skb);
+ vrf_tx_error(skb->dev, skb);
return ret;
}
return;
/* ignore non-ISO3166 country codes */
- for (i = 0; i < sizeof(req->alpha2); i++)
+ for (i = 0; i < 2; i++)
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n",
req->alpha2[0], req->alpha2[1]);
module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine");
+static int brcmf_iapp_enable;
+module_param_named(iapp, brcmf_iapp_enable, int, 0);
+MODULE_PARM_DESC(iapp, "Enable partial support for the obsoleted Inter-Access Point Protocol");
+
#ifdef DEBUG
/* always succeed brcmf_bus_started() */
static int brcmf_ignore_probe_fail;
settings->feature_disable = brcmf_feature_disable;
settings->fcmode = brcmf_fcmode;
settings->roamoff = !!brcmf_roamoff;
+ settings->iapp = !!brcmf_iapp_enable;
#ifdef DEBUG
settings->ignore_probe_fail = !!brcmf_ignore_probe_fail;
#endif
unsigned int feature_disable;
int fcmode;
bool roamoff;
+ bool iapp;
bool ignore_probe_fail;
struct brcmfmac_pd_cc *country_codes;
union {
schedule_work(&ifp->multicast_work);
}
+/**
+ * brcmf_skb_is_iapp - checks if skb is an IAPP packet
+ *
+ * @skb: skb to check
+ */
+static bool brcmf_skb_is_iapp(struct sk_buff *skb)
+{
+ static const u8 iapp_l2_update_packet[6] __aligned(2) = {
+ 0x00, 0x01, 0xaf, 0x81, 0x01, 0x00,
+ };
+ unsigned char *eth_data;
+#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ const u16 *a, *b;
+#endif
+
+ if (skb->len - skb->mac_len != 6 ||
+ !is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+ return false;
+
+ eth_data = skb_mac_header(skb) + ETH_HLEN;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return !(((*(const u32 *)eth_data) ^ (*(const u32 *)iapp_l2_update_packet)) |
+ ((*(const u16 *)(eth_data + 4)) ^ (*(const u16 *)(iapp_l2_update_packet + 4))));
+#else
+ a = (const u16 *)eth_data;
+ b = (const u16 *)iapp_l2_update_packet;
+
+ return !((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]));
+#endif
+}
+
static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
goto done;
}
+ /* Some recent Broadcom's firmwares disassociate STA when they receive
+ * an 802.11f ADD frame. This behavior can lead to a local DoS security
+ * issue. Attacker may trigger disassociation of any STA by sending a
+ * proper Ethernet frame to the wireless interface.
+ *
+ * Moreover this feature may break AP interfaces in some specific
+ * setups. This applies e.g. to the bridge with hairpin mode enabled and
+ * IFLA_BRPORT_MCAST_TO_UCAST set. IAPP packet generated by a firmware
+ * will get passed back to the wireless interface and cause immediate
+ * disassociation of a just-connected STA.
+ */
+ if (!drvr->settings->iapp && brcmf_skb_is_iapp(skb)) {
+ dev_kfree_skb(skb);
+ ret = -EINVAL;
+ goto done;
+ }
+
/* Make sure there's enough writeable headroom */
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
+ /* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
+ * STA connects to the AP interface. This is an obsoleted standard most
+ * users don't use, so don't pass these frames up unless requested.
+ */
+ if (!ifp->drvr->settings->iapp && brcmf_skb_is_iapp(skb)) {
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
if (skb->pkt_type == PACKET_MULTICAST)
ifp->ndev->stats.multicast++;
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
+#include "fw/file.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 36
.integrated = true,
.soc_latency = 5000,
};
+
+const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
FW_PHY_CFG_RX_CHAIN_POS = 20,
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+ FW_PHY_CFG_SHARED_CLK = BIT(31),
};
#define IWL_UCODE_MAX_CS 1
u8 ucode_api_max;
u8 ucode_api_min;
u32 min_umac_error_event_table;
+ u32 extra_phy_cfg_flags;
};
/*
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
+
+ /* set flags extra PHY configuration flags from the device's cfg */
+ phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
+
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
phy_cfg_cmd.calib_control.flow_trigger =
* Send the bcast station. At this stage the TBTT and DTIM time
* events are added and applied to the scheduler
*/
- iwl_mvm_send_add_bcast_sta(mvm, vif);
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
if (ret)
goto out_unbind;
- iwl_mvm_add_mcast_sta(mvm, vif);
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
if (ret) {
iwl_mvm_send_rm_bcast_sta(mvm, vif);
goto out_unbind;
ret = 0;
goto out;
case NL80211_IFTYPE_STATION:
+ mvmvif->csa_bcn_pending = false;
break;
case NL80211_IFTYPE_MONITOR:
/* always disable PS when a monitor interface is active */
}
if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
- u32 duration = 2 * vif->bss_conf.beacon_int;
+ u32 duration = 3 * vif->bss_conf.beacon_int;
/* iwl_mvm_protect_session() reads directly from the
* device (the system time), so make sure it is
/* Protect the session to make sure we hear the first
* beacon on the new channel.
*/
+ mvmvif->csa_bcn_pending = true;
iwl_mvm_protect_session(mvm, vif, duration, duration,
vif->bss_conf.beacon_int / 2,
true);
if (vif->type == NL80211_IFTYPE_STATION) {
struct iwl_mvm_sta *mvmsta;
+ mvmvif->csa_bcn_pending = false;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
mvmvif->ap_sta_id);
bool csa_failed;
u16 csa_target_freq;
+ /* Indicates that we are waiting for a beacon on a new channel */
+ bool csa_bcn_pending;
+
/* TCP Checksum Offload */
netdev_features_t features;
};
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
return;
ctxt->ref--;
+
+ /*
+ * Move unused phy's to a default channel. When the phy is moved the,
+ * fw will cleanup immediate quiet bit if it was previously set,
+ * otherwise we might not be able to reuse this phy.
+ */
+ if (ctxt->ref == 0) {
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
+ }
}
static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
u32 qmask, enum nl80211_iftype iftype,
enum iwl_sta_type type)
{
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ sta->sta_id == IWL_MVM_INVALID_STA) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
/*
* Note the possible cases:
- * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
- * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
- * one and mark it as reserved
- * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
- * non-DQA mode, since the TXQ hasn't yet been allocated
- * Don't support case 3 for new TX path as it is not expected to happen
- * and aggregation will be offloaded soon anyway
+ * 1. An enabled TXQ - TXQ needs to become agg'ed
+ * 2. The TXQ hasn't yet been enabled, so find a free one and mark
+ * it as reserved
*/
txq_id = mvmsta->tid_data[tid].txq_id;
- if (iwl_mvm_has_new_tx_api(mvm)) {
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
- ret = -ENXIO;
- goto release_locks;
- }
- } else if (unlikely(mvm->queue_info[txq_id].status ==
- IWL_MVM_QUEUE_SHARED)) {
- ret = -ENXIO;
- IWL_DEBUG_TX_QUEUES(mvm,
- "Can't start tid %d agg on shared queue!\n",
- tid);
- goto release_locks;
- } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
- /*
- * TXQ shouldn't be in inactive mode for non-DQA, so getting
- * an inactive queue from iwl_mvm_find_free_queue() is
- * certainly a bug
- */
- WARN_ON(mvm->queue_info[txq_id].status ==
- IWL_MVM_QUEUE_INACTIVE);
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can't start tid %d agg on shared queue!\n",
+ tid);
+ goto release_locks;
}
spin_unlock(&mvm->queue_info_lock);
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta,
- u16 txq_id)
+ struct iwl_mvm_tid_data *tid_data)
{
+ u16 txq_id = tid_data->txq_id;
+
if (iwl_mvm_has_new_tx_api(mvm))
return;
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
* free.
*/
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+ tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
spin_unlock_bh(&mvm->queue_info_lock);
}
mvmsta->agg_tids &= ~BIT(tid);
- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
switch (tid_data->state) {
case IWL_AGG_ON:
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true);
}
sta_id = mvm_sta->sta_id;
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
- ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
- false);
- goto end;
- }
-
/*
* It is possible that the 'sta' parameter is NULL, and thus
- * there is a need to retrieve the sta from the local station
+ * there is a need to retrieve the sta from the local station
* table.
*/
if (!sta) {
if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
+ } else {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->mcast_sta.sta_id;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+ goto end;
}
/* If the key_offset is not pre-assigned, we need to find a
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
struct ieee80211_vif *vif,
const char *errmsg)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
if (vif->type != NL80211_IFTYPE_STATION)
return false;
- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+
+ if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
+ vif->bss_conf.dtim_period)
return false;
if (errmsg)
IWL_ERR(mvm, "%s\n", errmsg);
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
- "No association and the time event is over already...");
+ "No beacon heard and the time event is over already...");
break;
default:
break;
struct iwl_mvm_int_sta *int_sta = sta;
struct iwl_mvm_sta *mvm_sta = sta;
- if (iwl_mvm_has_new_tx_api(mvm)) {
- if (internal)
- return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
- BIT(IWL_MGMT_TID), flags);
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
+ offsetof(struct iwl_mvm_sta, sta_id));
+ if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xFF, flags);
- }
+ 0xff | BIT(IWL_MGMT_TID), flags);
if (internal)
return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
if (!len && vq->busyloop_timeout) {
/* Both tx vq and rx socket were polled here */
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, 1);
vhost_disable_notify(&net->dev, vq);
preempt_disable();
struct iov_iter fixup;
__virtio16 num_buffers;
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, 0);
sock = vq->private_data;
if (!sock)
goto out;
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- if (vq->iotlb) {
- /* When device IOTLB was used, the access validation
- * will be validated during prefetching.
- */
- return 1;
- }
- return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
+ int ret = vq_log_access_ok(vq, vq->log_base);
+
+ if (ret || vq->iotlb)
+ return ret;
+
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
+ if (likely(mac_len > ETH_TLEN))
+ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */
- veth->h_vlan_proto = vlan_proto;
+ if (likely(mac_len >= ETH_TLEN)) {
+ /* h_vlan_encapsulated_proto should already be populated, and
+ * skb->data has space for h_vlan_proto
+ */
+ veth->h_vlan_proto = vlan_proto;
+ } else {
+ /* h_vlan_encapsulated_proto should not be populated, and
+ * skb->data has no space for h_vlan_proto
+ */
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+ }
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
}
#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+ (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
static inline int net_dim_stats_compare(struct net_dim_stats *curr,
struct net_dim_stats *prev)
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
+#define NFT_FLOWTABLE_DEVICE_MAX 8
+
/**
* struct nft_flowtable - nf_tables flow table
*
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
+ * @dev_name: array of device names
* @data: rhashtable and garbage collector
* @ops: array of hooks
*/
u32 genmask:2,
use:30;
u64 handle;
+ char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_RUNNING,
};
struct qdisc_size_table {
{
struct batadv_neigh_node *neigh_curr = NULL;
struct batadv_neigh_node *neigh_old = NULL;
- struct batadv_orig_node *orig_dst_node;
+ struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *gw_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
vid = batadv_get_vid(skb, 0);
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto out;
+
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest, vid);
if (!orig_dst_node)
batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr)
{
- return batadv_transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest, BATADV_NO_FLAGS);
+ return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
+ BATADV_NO_FLAGS);
}
/**
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;
- eth = (struct ethhdr *)skb_mac_header(skb);
+ eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}
}
mac_len = skb->data - skb_mac_header(skb);
- memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
- mac_len - VLAN_HLEN - ETH_TLEN);
+ if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
+ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+ mac_len - VLAN_HLEN - ETH_TLEN);
+ }
skb->mac_header += VLAN_HLEN;
return skb;
}
struct ip_tunnel *nt;
struct net_device *dev;
int t_hlen;
+ int mtu;
+ int err;
BUG_ON(!itn->fb_tunnel_dev);
dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
if (IS_ERR(dev))
return ERR_CAST(dev);
- dev->mtu = ip_tunnel_bind_dev(dev);
+ mtu = ip_tunnel_bind_dev(dev);
+ err = dev_set_mtu(dev, mtu);
+ if (err)
+ goto err_dev_set_mtu;
nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
ip_tunnel_add(itn, nt);
return nt;
+
+err_dev_set_mtu:
+ unregister_netdevice(dev);
+ return ERR_PTR(err);
}
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
nt->fwmark = fwmark;
err = register_netdevice(dev);
if (err)
- goto out;
+ goto err_register_netdevice;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
mtu = ip_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
+ if (tb[IFLA_MTU]) {
+ unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
+
+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
+ (unsigned int)(max - sizeof(struct iphdr)));
+ }
+
+ err = dev_set_mtu(dev, mtu);
+ if (err)
+ goto err_dev_set_mtu;
ip_tunnel_add(itn, nt);
-out:
+ return 0;
+
+err_dev_set_mtu:
+ unregister_netdevice(dev);
+err_register_netdevice:
return err;
}
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
memcpy(dev->dev_addr, &iph->saddr, 4);
memcpy(dev->broadcast, &iph->daddr, 4);
- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
- dev->mtu = ETH_DATA_LEN;
dev->flags = IFF_NOARP;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+ if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *tmpl;
+
+ tmpl = nf_ct_get(skb, &ctinfo);
+ if (tmpl && nf_ct_is_template(tmpl)) {
+ /* when skipping ct, clear templates to avoid fooling
+ * later targets/matches
+ */
+ skb->_nfct = 0;
+ nf_ct_put(tmpl);
+ }
return NF_ACCEPT;
+ }
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
}
int doff = 0;
if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
- struct udphdr _hdr, *hp;
+ struct tcphdr _hdr;
+ struct udphdr *hp;
hp = skb_header_pointer(skb, ip_hdrlen(skb),
- sizeof(_hdr), &_hdr);
+ iph->protocol == IPPROTO_UDP ?
+ sizeof(*hp) : sizeof(_hdr), &_hdr);
if (hp == NULL)
return NULL;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack = 0;
treq->tfo_listener = false;
+ if (IS_ENABLED(CONFIG_SMC))
+ ireq->smc_ok = 0;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
+ if (IS_ENABLED(CONFIG_SMC) && want_cookie)
+ tmp_opt.smc_ok = 0;
+
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb, sk);
inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
const struct sockcm_cookie *sockc)
{
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
int exthdrlen = 0;
int dst_exthdrlen = 0;
int hh_len;
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+ * the first fragment
+ */
+ if (headersize + transhdrlen > mtu)
+ goto emsgsize;
+
if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_RAW)) {
if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
- ipv6_local_error(sk, EMSGSIZE, fl6,
- mtu - headersize +
- sizeof(struct ipv6hdr));
+ pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+ ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
return 0;
}
-static void vti6_link_config(struct ip6_tnl *t)
+static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
{
struct net_device *dev = t->dev;
struct __ip6_tnl_parm *p = &t->parms;
struct net_device *tdev = NULL;
+ int mtu;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
else
dev->flags &= ~IFF_POINTOPOINT;
+ if (keep_mtu && dev->mtu) {
+ dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu);
+ return;
+ }
+
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
tdev = __dev_get_by_index(t->net, p->link);
if (tdev)
- dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
- IPV6_MIN_MTU);
+ mtu = tdev->mtu - sizeof(struct ipv6hdr);
+ else
+ mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr);
+
+ dev->mtu = max_t(int, mtu, IPV6_MIN_MTU);
}
/**
* vti6_tnl_change - update the tunnel parameters
* @t: tunnel to be changed
* @p: tunnel configuration parameters
+ * @keep_mtu: MTU was set from userspace, don't re-compute it
*
* Description:
* vti6_tnl_change() updates the tunnel parameters
**/
static int
-vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
+vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
+ bool keep_mtu)
{
t->parms.laddr = p->laddr;
t->parms.raddr = p->raddr;
t->parms.proto = p->proto;
t->parms.fwmark = p->fwmark;
dst_cache_reset(&t->dst_cache);
- vti6_link_config(t);
+ vti6_link_config(t, keep_mtu);
return 0;
}
-static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p,
+ bool keep_mtu)
{
struct net *net = dev_net(t->dev);
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
vti6_tnl_unlink(ip6n, t);
synchronize_net();
- err = vti6_tnl_change(t, p);
+ err = vti6_tnl_change(t, p, keep_mtu);
vti6_tnl_link(ip6n, t);
netdev_state_change(t->dev);
return err;
} else
t = netdev_priv(dev);
- err = vti6_update(t, &p1);
+ err = vti6_update(t, &p1, false);
}
if (t) {
err = 0;
dev->priv_destructor = vti6_dev_free;
dev->type = ARPHRD_TUNNEL6;
- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
- dev->mtu = ETH_DATA_LEN;
dev->min_mtu = IPV6_MIN_MTU;
- dev->max_mtu = IP_MAX_MTU;
+ dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
if (err)
return err;
- vti6_link_config(t);
+ vti6_link_config(t, true);
return 0;
}
} else
t = netdev_priv(dev);
- return vti6_update(t, &p);
+ return vti6_update(t, &p, tb && tb[IFLA_MTU]);
}
static size_t vti6_get_size(const struct net_device *dev)
}
if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
- struct udphdr _hdr, *hp;
+ struct tcphdr _hdr;
+ struct udphdr *hp;
- hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
+ hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
+ sizeof(*hp) : sizeof(_hdr), &_hdr);
if (hp == NULL)
return NULL;
struct rt6_info *rt, *rt_cache;
struct fib6_node *fn;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ flags &= ~RT6_LOOKUP_F_IFACE;
+
rcu_read_lock();
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
struct neighbour *neigh;
__u8 neigh_flags = 0;
- neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
- if (neigh) {
+ neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
+ if (neigh)
neigh_flags = neigh->flags;
- neigh_release(neigh);
- }
+
if (!(neigh_flags & NTF_ROUTER)) {
RT6_TRACE("purging route %p via non-router but gateway\n",
rt);
if (!rcu_access_pointer(rt->rt6i_exception_bucket))
return;
- spin_lock_bh(&rt6_exception_lock);
+ rcu_read_lock_bh();
+ spin_lock(&rt6_exception_lock);
bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
lockdep_is_held(&rt6_exception_lock));
bucket++;
}
}
- spin_unlock_bh(&rt6_exception_lock);
+ spin_unlock(&rt6_exception_lock);
+ rcu_read_unlock_bh();
}
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
#include <linux/net.h>
#include <linux/module.h>
#include <net/ip.h>
+#include <net/ip_tunnels.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/netns/generic.h>
tinfo = seg6_encap_lwtunnel(dst->lwtstate);
- if (likely(!skb->encapsulation)) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
-
switch (tinfo->mode) {
case SEG6_IPTUN_MODE_INLINE:
if (skb->protocol != htons(ETH_P_IPV6))
err = seg6_do_srh_inline(skb, tinfo->srh);
if (err)
return err;
-
- skb_reset_inner_headers(skb);
break;
case SEG6_IPTUN_MODE_ENCAP:
+ err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
+ if (err)
+ return err;
+
if (skb->protocol == htons(ETH_P_IPV6))
proto = IPPROTO_IPV6;
else if (skb->protocol == htons(ETH_P_IP))
if (err)
return err;
+ skb_set_inner_transport_header(skb, skb_transport_offset(skb));
+ skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = htons(ETH_P_IPV6);
break;
case SEG6_IPTUN_MODE_L2ENCAP:
ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
- skb_set_inner_protocol(skb, skb->protocol);
-
return 0;
}
treq->snt_isn = cookie;
treq->ts_off = 0;
treq->txhash = net_tx_rndhash();
+ if (IS_ENABLED(CONFIG_SMC))
+ ireq->smc_ok = 0;
/*
* We need to lookup the dst_entry to get the correct window size.
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- llc_conn_send_pdu(sk, skb);
+ rc = llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- llc_conn_send_pdu(sk, skb);
+ rc = llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(sk);
+ int ret;
if (llc->ack_must_be_send) {
- llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
+ ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
llc->ack_must_be_send = 0 ;
llc->ack_pf = 0;
- } else
- llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
- return 0;
+ } else {
+ ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
+ }
+
+ return ret;
}
/**
#endif
static int llc_find_offset(int state, int ev_type);
-static void llc_conn_send_pdus(struct sock *sk);
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
return rc;
}
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
- llc_conn_send_pdus(sk);
+ return llc_conn_send_pdus(sk, skb);
}
/**
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk);
+ llc_conn_send_pdus(sk, NULL);
out:;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk);
+ llc_conn_send_pdus(sk, NULL);
out:;
}
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
+ * @hold_skb: the skb held by caller, or NULL if does not care
*
- * Sends queued pdus to MAC layer for transmission.
+ * Sends queued pdus to MAC layer for transmission. When @hold_skb is
+ * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
+ * successfully, or 1 for failure.
*/
-static void llc_conn_send_pdus(struct sock *sk)
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
{
struct sk_buff *skb;
+ int ret = 0;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
- skb = skb2;
+ dev_queue_xmit(skb2);
+ } else {
+ bool is_target = skb == hold_skb;
+ int rc;
+
+ if (is_target)
+ skb_get(skb);
+ rc = dev_queue_xmit(skb);
+ if (is_target)
+ ret = rc;
}
- dev_queue_xmit(skb);
}
+
+ return ret;
}
/**
kfree(trans);
}
+/* removal requests are queued in the commit_list, but not acted upon
+ * until after all new rules are in place.
+ *
+ * Therefore, nf_register_net_hook(net, &nat_hook) runs before pending
+ * nf_unregister_net_hook().
+ *
+ * nf_register_net_hook thus fails if a nat hook is already in place
+ * even if the conflicting hook is about to be removed.
+ *
+ * If collision is detected, search commit_log for DELCHAIN matching
+ * the new nat hooknum; if we find one collision is temporary:
+ *
+ * Either transaction is aborted (new/colliding hook is removed), or
+ * transaction is committed (old hook is removed).
+ */
+static bool nf_tables_allow_nat_conflict(const struct net *net,
+ const struct nf_hook_ops *ops)
+{
+ const struct nft_trans *trans;
+ bool ret = false;
+
+ if (!ops->nat_hook)
+ return false;
+
+ list_for_each_entry(trans, &net->nft.commit_list, list) {
+ const struct nf_hook_ops *pending_ops;
+ const struct nft_chain *pending;
+
+ if (trans->msg_type != NFT_MSG_NEWCHAIN &&
+ trans->msg_type != NFT_MSG_DELCHAIN)
+ continue;
+
+ pending = trans->ctx.chain;
+ if (!nft_is_base_chain(pending))
+ continue;
+
+ pending_ops = &nft_base_chain(pending)->ops;
+ if (pending_ops->nat_hook &&
+ pending_ops->pf == ops->pf &&
+ pending_ops->hooknum == ops->hooknum) {
+ /* other hook registration already pending? */
+ if (trans->msg_type == NFT_MSG_NEWCHAIN)
+ return false;
+
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
{
+ struct nf_hook_ops *ops;
+ int ret;
+
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
return 0;
- return nf_register_net_hook(net, &nft_base_chain(chain)->ops);
+ ops = &nft_base_chain(chain)->ops;
+ ret = nf_register_net_hook(net, ops);
+ if (ret == -EBUSY && nf_tables_allow_nat_conflict(net, ops)) {
+ ops->nat_hook = false;
+ ret = nf_register_net_hook(net, ops);
+ ops->nat_hook = true;
+ }
+
+ return ret;
}
static void nf_tables_unregister_hook(struct net *net,
free_percpu(basechain->stats);
if (basechain->stats)
static_branch_dec(&nft_counters_enabled);
- if (basechain->ops.dev != NULL)
- dev_put(basechain->ops.dev);
kfree(chain->name);
kfree(basechain);
} else {
}
nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
- dev = dev_get_by_name(net, ifname);
+ dev = __dev_get_by_name(net, ifname);
if (!dev) {
module_put(type->owner);
return -ENOENT;
static void nft_chain_release_hook(struct nft_chain_hook *hook)
{
module_put(hook->type->owner);
- if (hook->dev != NULL)
- dev_put(hook->dev);
}
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
[NFTA_RULE_POSITION] = { .type = NLA_U64 },
[NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
+ [NFTA_RULE_ID] = { .type = NLA_U32 },
};
static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags)
{
+ if ((flags & NFT_SET_EVAL) && !ops->update)
+ return false;
+
return (flags & ops->features) == (flags & NFT_SET_FEATURES);
}
if (est.space == best.space &&
est.lookup < best.lookup)
break;
- } else if (est.size < best.size) {
+ } else if (est.size < best.size || !bops) {
break;
}
continue;
[NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
[NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
+ [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
return ERR_PTR(-ENOENT);
}
-#define NFT_FLOWTABLE_DEVICE_MAX 8
-
static int nf_tables_parse_devices(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct net_device *dev_array[], int *len)
}
nla_strlcpy(ifname, tmp, IFNAMSIZ);
- dev = dev_get_by_name(ctx->net, ifname);
+ dev = __dev_get_by_name(ctx->net, ifname);
if (!dev) {
err = -ENOENT;
goto err1;
err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
dev_array, &n);
if (err < 0)
- goto err1;
+ return err;
ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
- if (!ops) {
- err = -ENOMEM;
- goto err1;
- }
+ if (!ops)
+ return -ENOMEM;
flowtable->hooknum = hooknum;
flowtable->priority = priority;
flowtable->ops[i].priv = &flowtable->data.rhashtable;
flowtable->ops[i].hook = flowtable->data.type->hook;
flowtable->ops[i].dev = dev_array[i];
+ flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
+ GFP_KERNEL);
}
- err = 0;
-err1:
- for (i = 0; i < n; i++)
- dev_put(dev_array[i]);
-
return err;
}
err5:
i = flowtable->ops_len;
err4:
- for (k = i - 1; k >= 0; k--)
+ for (k = i - 1; k >= 0; k--) {
+ kfree(flowtable->dev_name[k]);
nf_unregister_net_hook(net, &flowtable->ops[k]);
+ }
kfree(flowtable->ops);
err3:
goto nla_put_failure;
for (i = 0; i < flowtable->ops_len; i++) {
- if (flowtable->ops[i].dev &&
+ if (flowtable->dev_name[i][0] &&
nla_put_string(skb, NFTA_DEVICE_NAME,
- flowtable->ops[i].dev->name))
+ flowtable->dev_name[i]))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
continue;
nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
+ flowtable->dev_name[i][0] = '\0';
flowtable->ops[i].dev = NULL;
break;
}
nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
u32 flags)
{
- if (desc->size && !(flags & NFT_SET_TIMEOUT)) {
+ if (desc->size && !(flags & (NFT_SET_EVAL | NFT_SET_TIMEOUT))) {
switch (desc->klen) {
case 4:
return &nft_hash_fast_ops;
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
+ if (alen < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
continue;
nest = nla_nest_start(skb, n_i);
- if (!nest)
+ if (!nest) {
+ index--;
goto nla_put_failure;
+ }
err = tcf_action_dump_1(skb, p, 0, 0);
if (err < 0) {
index--;
*/
static inline bool qdisc_restart(struct Qdisc *q, int *packets)
{
+ bool more, validate, nolock = q->flags & TCQ_F_NOLOCK;
spinlock_t *root_lock = NULL;
struct netdev_queue *txq;
struct net_device *dev;
struct sk_buff *skb;
- bool validate;
/* Dequeue packet */
+ if (nolock && test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+ return false;
+
skb = dequeue_skb(q, &validate, packets);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ if (nolock)
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
return false;
+ }
- if (!(q->flags & TCQ_F_NOLOCK))
+ if (!nolock)
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
- return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
+ more = sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
+ if (nolock)
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ return more;
}
void __qdisc_run(struct Qdisc *q)
/* receive the complete CLC message */
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
krflags = MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
struct sock *sk = strp->sk;
/* Report an error on the lower socket */
- sk->sk_err = err;
+ sk->sk_err = -err;
sk->sk_error_report(sk);
}
}
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.msg_timeouts);
strp->cb.lock(strp);
- strp->cb.abort_parser(strp, ETIMEDOUT);
+ strp->cb.abort_parser(strp, -ETIMEDOUT);
strp->cb.unlock(strp);
}
};
struct xfrm_trans_cb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
};
return;
afinfo = xfrm_state_get_afinfo(proto);
- if (afinfo)
+ if (afinfo) {
afinfo->local_error(skb, mtu);
- rcu_read_unlock();
+ rcu_read_unlock();
+ }
}
EXPORT_SYMBOL_GPL(xfrm_local_error);
jsonw_string_field(json_wtr, "name", info->name);
jsonw_name(json_wtr, "flags");
- jsonw_printf(json_wtr, "%#x", info->map_flags);
+ jsonw_printf(json_wtr, "%d", info->map_flags);
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);