mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-28 19:57:53 +10:00
Previous releases - regressions:
- netlink: avoid infinite retry looping in netlink_unicast() Previous releases - always broken: - packet: fix a race in packet_set_ring() and packet_notifier() - ipv6: reject malicious packets in ipv6_gso_segment() - sched: mqprio: fix stack out-of-bounds write in tc entry parsing - net: drop UFO packets (injected via virtio) in udp_rcv_segment() - eth: mlx5: correctly set gso_segs when LRO is used, avoid false positive checksum validation errors - netpoll: prevent hanging NAPI when netcons gets enabled - phy: mscc: fix parsing of unicast frames for PTP timestamping - number of device tree / OF reference leak fixes Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmiUvMcACgkQMUZtbf5S IrucuA//bQGZdQkpRo/2zWDFS4wN7hV8PkR+kI0F4rUyNQjFDyUOrccYnhHl8VRn DfnmVC9oiWxwuW2QgrgH1KKSw9dxhDPVmhLezAHaZv9XAPPgPO/Yb2Dr3oKHF+yK +/QW57FN8rNg8mHUzMS/26Y+rH6OubTaf3MKcsT2uZhuIXKHScOUedmr6EeEkp/L c32MpWfcVC7M2jjvCH+HO4k4RawWaB8W93iMUrLMSVI1oE3Tsjyhl5Cv9TBixh7g 3KZW98qVqMXKBQr0QTF4LBR0IWgKOS4KMVJqrgQ3CZszbDWbmBsFfi8olr/AS0Nt vgk6hTd8vVHXa+sOvdFpDbocdjXBq6vf5bDd9p57bt3JyxFMfFUWbG1eDj8bUpMI YuKAhQg9mTr6R8DaLwqANY8zrSET2FiYbNkUsP9TD8++q2j34U5/a468cxymfjJ/ 90vSrBGUpwxYPhx2B+Slu1SZJ1g0NbUL34jrKBuSNloVoZDRuzq2zd310BigG/35 T5CTr5OH+USlFjL6KpgGHnygiMQB/h2WgEjWbFoZHnIb+exVKCgS6IiNBVV3b2OK Nv6iVyFKPdKJ8qxeaiYMWA16wG8buOytlBn/1hYKLBvyVKAAx9Ib4ao2TK9w/1Vs 50sX21h+Awj2a/17sXlSXA+RVdeBcAzHdsGWHr1ql31htn2Xl10= =nwy7 -----END PGP SIGNATURE----- Merge tag 'net-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: Previous releases - regressions: - netlink: avoid infinite retry looping in netlink_unicast() Previous releases - always broken: - packet: fix a race in packet_set_ring() and packet_notifier() - ipv6: reject malicious packets in ipv6_gso_segment() - sched: mqprio: fix stack out-of-bounds write in tc entry parsing - net: drop UFO packets (injected via virtio) in udp_rcv_segment() - eth: mlx5: correctly set gso_segs when LRO is used, avoid false positive checksum validation errors - netpoll: prevent hanging NAPI when netcons gets enabled - phy: mscc: fix parsing of unicast frames for PTP timestamping - a number of device tree / OF reference leak fixes" * tag 'net-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (44 commits) pptp: fix pptp_xmit() error path net: ti: icssg-prueth: Fix skb handling for XDP_PASS net: Update threaded state in napi config in netif_set_threaded selftests: netdevsim: Xfail nexthop test on slow machines eth: fbnic: Lock the tx_dropped update eth: fbnic: Fix tx_dropped reporting eth: fbnic: remove the debugging trick of super high page bias net: ftgmac100: fix potential NULL pointer access in ftgmac100_phy_disconnect dt-bindings: net: Replace bouncing Alexandru Tachici emails dpll: zl3073x: ZL3073X_I2C and ZL3073X_SPI should depend on NET net/sched: mqprio: fix stack out-of-bounds write in tc entry parsing Revert "net: mdio_bus: Use devm for getting reset GPIO" selftests: net: packetdrill: xfail all problems on slow machines net/packet: fix a race in packet_set_ring() and packet_notifier() benet: fix BUG when creating VFs net: airoha: npu: Add missing MODULE_FIRMWARE macros net: devmem: fix DMA direction on unmapping ipa: fix compile-testing with qcom-mdt=m eth: fbnic: unlink NAPIs from queues on error to open net: Add locking to protect skb->dev access in ip_output ...
This commit is contained in:
commit
3781648824
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Analog Devices ADIN1200/ADIN1300 PHY
|
||||
|
||||
maintainers:
|
||||
- Alexandru Tachici <alexandru.tachici@analog.com>
|
||||
- Marcelo Schmitt <marcelo.schmitt@analog.com>
|
||||
|
||||
description: |
|
||||
Bindings for Analog Devices Industrial Ethernet PHYs
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ADI ADIN1110 MAC-PHY
|
||||
|
||||
maintainers:
|
||||
- Alexandru Tachici <alexandru.tachici@analog.com>
|
||||
- Marcelo Schmitt <marcelo.schmitt@analog.com>
|
||||
|
||||
description: |
|
||||
The ADIN1110 is a low power single port 10BASE-T1L MAC-
|
||||
|
@ -2342,9 +2342,6 @@ operations:
|
||||
|
||||
do: &module-eeprom-get-op
|
||||
request:
|
||||
attributes:
|
||||
- header
|
||||
reply:
|
||||
attributes:
|
||||
- header
|
||||
- offset
|
||||
@ -2352,6 +2349,9 @@ operations:
|
||||
- page
|
||||
- bank
|
||||
- i2c-address
|
||||
reply:
|
||||
attributes:
|
||||
- header
|
||||
- data
|
||||
dump: *module-eeprom-get-op
|
||||
-
|
||||
|
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
config ZL3073X
|
||||
tristate "Microchip Azurite DPLL/PTP/SyncE devices"
|
||||
tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
|
||||
depends on NET
|
||||
select DPLL
|
||||
select NET_DEVLINK
|
||||
@ -16,9 +16,9 @@ config ZL3073X
|
||||
|
||||
config ZL3073X_I2C
|
||||
tristate "I2C bus implementation for Microchip Azurite devices"
|
||||
depends on I2C && ZL3073X
|
||||
depends on I2C && NET
|
||||
select REGMAP_I2C
|
||||
default m
|
||||
select ZL3073X
|
||||
help
|
||||
This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
|
||||
devices.
|
||||
@ -28,9 +28,9 @@ config ZL3073X_I2C
|
||||
|
||||
config ZL3073X_SPI
|
||||
tristate "SPI bus implementation for Microchip Azurite devices"
|
||||
depends on SPI && ZL3073X
|
||||
depends on NET && SPI
|
||||
select REGMAP_SPI
|
||||
default m
|
||||
select ZL3073X
|
||||
help
|
||||
This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
|
||||
devices.
|
||||
|
@ -579,6 +579,8 @@ static struct platform_driver airoha_npu_driver = {
|
||||
};
|
||||
module_platform_driver(airoha_npu_driver);
|
||||
|
||||
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
|
||||
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
|
||||
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
|
||||
|
@ -508,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
|
||||
FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
|
||||
}
|
||||
|
||||
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
|
||||
u32 hash)
|
||||
static struct airoha_foe_entry *
|
||||
airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
|
||||
{
|
||||
lockdep_assert_held(&ppe_lock);
|
||||
|
||||
if (hash < PPE_SRAM_NUM_ENTRIES) {
|
||||
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
|
||||
struct airoha_eth *eth = ppe->eth;
|
||||
@ -537,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
|
||||
return ppe->foe + hash * sizeof(struct airoha_foe_entry);
|
||||
}
|
||||
|
||||
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
|
||||
u32 hash)
|
||||
{
|
||||
struct airoha_foe_entry *hwe;
|
||||
|
||||
spin_lock_bh(&ppe_lock);
|
||||
hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
|
||||
spin_unlock_bh(&ppe_lock);
|
||||
|
||||
return hwe;
|
||||
}
|
||||
|
||||
static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
|
||||
struct airoha_foe_entry *hwe)
|
||||
{
|
||||
@ -651,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
|
||||
struct airoha_flow_table_entry *f;
|
||||
int type;
|
||||
|
||||
hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
|
||||
hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
|
||||
if (!hwe_p)
|
||||
return -EINVAL;
|
||||
|
||||
@ -703,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
|
||||
|
||||
spin_lock_bh(&ppe_lock);
|
||||
|
||||
hwe = airoha_ppe_foe_get_entry(ppe, hash);
|
||||
hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
|
||||
if (!hwe)
|
||||
goto unlock;
|
||||
|
||||
@ -818,7 +832,7 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
|
||||
u32 ib1, state;
|
||||
int idle;
|
||||
|
||||
hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
|
||||
hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
|
||||
if (!hwe)
|
||||
continue;
|
||||
|
||||
@ -855,7 +869,7 @@ static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
|
||||
if (e->hash == 0xffff)
|
||||
goto unlock;
|
||||
|
||||
hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
|
||||
hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
|
||||
if (!hwe_p)
|
||||
goto unlock;
|
||||
|
||||
|
@ -3856,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
err:
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1750,16 +1750,17 @@ err_register_mdiobus:
|
||||
static void ftgmac100_phy_disconnect(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
struct phy_device *phydev = netdev->phydev;
|
||||
|
||||
if (!netdev->phydev)
|
||||
if (!phydev)
|
||||
return;
|
||||
|
||||
phy_disconnect(netdev->phydev);
|
||||
phy_disconnect(phydev);
|
||||
if (of_phy_is_fixed_link(priv->dev->of_node))
|
||||
of_phy_deregister_fixed_link(priv->dev->of_node);
|
||||
|
||||
if (priv->use_ncsi)
|
||||
fixed_phy_unregister(netdev->phydev);
|
||||
fixed_phy_unregister(phydev);
|
||||
}
|
||||
|
||||
static void ftgmac100_destroy_mdio(struct net_device *netdev)
|
||||
|
@ -371,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
|
||||
of_node_put(ptp_node);
|
||||
}
|
||||
|
||||
if (ptp_dev)
|
||||
if (ptp_dev) {
|
||||
ptp = platform_get_drvdata(ptp_dev);
|
||||
put_device(&ptp_dev->dev);
|
||||
}
|
||||
|
||||
if (ptp)
|
||||
info->phc_index = ptp->phc_index;
|
||||
|
@ -829,19 +829,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
|
||||
{
|
||||
struct platform_device *ierb_pdev;
|
||||
struct device_node *ierb_node;
|
||||
int ret;
|
||||
|
||||
ierb_node = of_find_compatible_node(NULL, NULL,
|
||||
"fsl,ls1028a-enetc-ierb");
|
||||
if (!ierb_node || !of_device_is_available(ierb_node))
|
||||
if (!ierb_node)
|
||||
return -ENODEV;
|
||||
|
||||
if (!of_device_is_available(ierb_node)) {
|
||||
of_node_put(ierb_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ierb_pdev = of_find_device_by_node(ierb_node);
|
||||
of_node_put(ierb_node);
|
||||
|
||||
if (!ierb_pdev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return enetc_ierb_register_pf(ierb_pdev, pdev);
|
||||
ret = enetc_ierb_register_pf(ierb_pdev, pdev);
|
||||
|
||||
put_device(&ierb_pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct enetc_si_ops enetc_psi_ops = {
|
||||
|
@ -1475,8 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
|
||||
if (ptp_node) {
|
||||
ptp_dev = of_find_device_by_node(ptp_node);
|
||||
of_node_put(ptp_node);
|
||||
if (ptp_dev)
|
||||
if (ptp_dev) {
|
||||
ptp = platform_get_drvdata(ptp_dev);
|
||||
put_device(&ptp_dev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
if (ptp)
|
||||
|
@ -2782,7 +2782,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
|
||||
if (!pdev)
|
||||
goto err_of_node_put;
|
||||
|
||||
get_device(&pdev->dev);
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
goto err_put_device;
|
||||
|
@ -1574,6 +1574,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
||||
unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
|
||||
|
||||
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
|
||||
skb_shinfo(skb)->gso_segs = lro_num_seg;
|
||||
/* Subtract one since we already counted this as one
|
||||
* "regular" packet in mlx5e_complete_rx_cqe()
|
||||
*/
|
||||
|
@ -33,7 +33,7 @@ int __fbnic_open(struct fbnic_net *fbn)
|
||||
dev_warn(fbd->dev,
|
||||
"Error %d sending host ownership message to the firmware\n",
|
||||
err);
|
||||
goto free_resources;
|
||||
goto err_reset_queues;
|
||||
}
|
||||
|
||||
err = fbnic_time_start(fbn);
|
||||
@ -57,6 +57,8 @@ time_stop:
|
||||
fbnic_time_stop(fbn);
|
||||
release_ownership:
|
||||
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
|
||||
err_reset_queues:
|
||||
fbnic_reset_netif_queues(fbn);
|
||||
free_resources:
|
||||
fbnic_free_resources(fbn);
|
||||
free_napi_vectors:
|
||||
@ -420,15 +422,17 @@ static void fbnic_get_stats64(struct net_device *dev,
|
||||
tx_packets = stats->packets;
|
||||
tx_dropped = stats->dropped;
|
||||
|
||||
stats64->tx_bytes = tx_bytes;
|
||||
stats64->tx_packets = tx_packets;
|
||||
stats64->tx_dropped = tx_dropped;
|
||||
|
||||
/* Record drops from Tx HW Datapath */
|
||||
spin_lock(&fbd->hw_stats_lock);
|
||||
tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
|
||||
fbd->hw_stats.tti.cm_drop.frames.value +
|
||||
fbd->hw_stats.tti.frame_drop.frames.value +
|
||||
fbd->hw_stats.tti.tbi_drop.frames.value;
|
||||
spin_unlock(&fbd->hw_stats_lock);
|
||||
|
||||
stats64->tx_bytes = tx_bytes;
|
||||
stats64->tx_packets = tx_packets;
|
||||
stats64->tx_dropped = tx_dropped;
|
||||
|
||||
for (i = 0; i < fbn->num_tx_queues; i++) {
|
||||
struct fbnic_ring *txr = fbn->tx[i];
|
||||
|
@ -661,8 +661,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
|
||||
{
|
||||
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
|
||||
|
||||
page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
|
||||
rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
|
||||
page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
|
||||
rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
|
||||
rx_buf->page = page;
|
||||
}
|
||||
|
||||
|
@ -91,10 +91,8 @@ struct fbnic_queue_stats {
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
/* Pagecnt bias is long max to reserve the last bit to catch overflow
|
||||
* cases where if we overcharge the bias it will flip over to be negative.
|
||||
*/
|
||||
#define PAGECNT_BIAS_MAX LONG_MAX
|
||||
#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
|
||||
|
||||
struct fbnic_rx_buf {
|
||||
struct page *page;
|
||||
long pagecnt_bias;
|
||||
|
@ -442,7 +442,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
|
||||
rule = container_of(acts, struct efx_tc_flow_rule, acts);
|
||||
if (rule->fallback)
|
||||
fallback = rule->fallback;
|
||||
else /* fallback: deliver to PF */
|
||||
else /* fallback of the fallback: deliver to PF */
|
||||
fallback = &efx->tc->facts.pf;
|
||||
rc = efx_mae_update_rule(efx, fallback->fw_id,
|
||||
rule->fw_id);
|
||||
|
@ -685,11 +685,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
|
||||
struct platform_device *pdev;
|
||||
struct device_node *iep_np;
|
||||
struct icss_iep *iep;
|
||||
int ret;
|
||||
|
||||
iep_np = of_parse_phandle(np, "ti,iep", idx);
|
||||
if (!iep_np || !of_device_is_available(iep_np))
|
||||
if (!iep_np)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (!of_device_is_available(iep_np)) {
|
||||
of_node_put(iep_np);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(iep_np);
|
||||
of_node_put(iep_np);
|
||||
|
||||
@ -698,21 +704,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
iep = platform_get_drvdata(pdev);
|
||||
if (!iep)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
if (!iep) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
device_lock(iep->dev);
|
||||
if (iep->client_np) {
|
||||
device_unlock(iep->dev);
|
||||
dev_err(iep->dev, "IEP is already acquired by %s",
|
||||
iep->client_np->name);
|
||||
return ERR_PTR(-EBUSY);
|
||||
ret = -EBUSY;
|
||||
goto err_put_pdev;
|
||||
}
|
||||
iep->client_np = np;
|
||||
device_unlock(iep->dev);
|
||||
get_device(iep->dev);
|
||||
|
||||
return iep;
|
||||
|
||||
err_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icss_iep_get_idx);
|
||||
|
||||
|
@ -706,9 +706,9 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
|
||||
struct page_pool *pool;
|
||||
struct sk_buff *skb;
|
||||
struct xdp_buff xdp;
|
||||
int headroom, ret;
|
||||
u32 *psdata;
|
||||
void *pa;
|
||||
int ret;
|
||||
|
||||
*xdp_state = 0;
|
||||
pool = rx_chn->pg_pool;
|
||||
@ -757,22 +757,23 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
|
||||
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
|
||||
|
||||
*xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
|
||||
if (*xdp_state == ICSSG_XDP_PASS)
|
||||
skb = xdp_build_skb_from_buff(&xdp);
|
||||
else
|
||||
if (*xdp_state != ICSSG_XDP_PASS)
|
||||
goto requeue;
|
||||
headroom = xdp.data - xdp.data_hard_start;
|
||||
pkt_len = xdp.data_end - xdp.data;
|
||||
} else {
|
||||
/* prepare skb and send to n/w stack */
|
||||
skb = napi_build_skb(pa, PAGE_SIZE);
|
||||
headroom = PRUETH_HEADROOM;
|
||||
}
|
||||
|
||||
/* prepare skb and send to n/w stack */
|
||||
skb = napi_build_skb(pa, PAGE_SIZE);
|
||||
if (!skb) {
|
||||
ndev->stats.rx_dropped++;
|
||||
page_pool_recycle_direct(pool, page);
|
||||
goto requeue;
|
||||
}
|
||||
|
||||
skb_reserve(skb, PRUETH_HEADROOM);
|
||||
skb_reserve(skb, headroom);
|
||||
skb_put(skb, pkt_len);
|
||||
skb->dev = ndev;
|
||||
|
||||
|
@ -5,7 +5,7 @@ config QCOM_IPA
|
||||
depends on INTERCONNECT
|
||||
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
|
||||
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
|
||||
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||
select QCOM_MDT_LOADER
|
||||
select QCOM_SCM
|
||||
select QCOM_QMI_HELPERS
|
||||
help
|
||||
|
@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
|
||||
return "4.11";
|
||||
case IPA_VERSION_5_0:
|
||||
return "5.0";
|
||||
case IPA_VERSION_5_1:
|
||||
return "5.1";
|
||||
case IPA_VERSION_5_5:
|
||||
return "5.5";
|
||||
default:
|
||||
return "0.0"; /* Won't happen (checked at probe time) */
|
||||
return "0.0"; /* Should not happen */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,10 +209,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!priv->clk)
|
||||
rate = clk_get_rate(priv->clk);
|
||||
if (!rate)
|
||||
rate = 250000000;
|
||||
else
|
||||
rate = clk_get_rate(priv->clk);
|
||||
|
||||
div = (rate / (2 * priv->clk_freq)) - 1;
|
||||
if (div & ~MDIO_CLK_DIV_MASK) {
|
||||
|
@ -900,6 +900,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
|
||||
get_unaligned_be32(ptp_multicast));
|
||||
} else {
|
||||
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
|
||||
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
|
||||
vsc85xx_ts_write_csr(phydev, blk,
|
||||
MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
|
||||
vsc85xx_ts_write_csr(phydev, blk,
|
||||
|
@ -98,6 +98,7 @@
|
||||
#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
|
||||
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
|
||||
|
@ -785,6 +785,7 @@ static struct phy_driver smsc_phy_driver[] = {
|
||||
|
||||
/* PHY_BASIC_FEATURES */
|
||||
|
||||
.flags = PHY_RST_AFTER_CLK_EN,
|
||||
.probe = smsc_phy_probe,
|
||||
|
||||
/* basic functions */
|
||||
|
@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
int len;
|
||||
unsigned char *data;
|
||||
__u32 seq_recv;
|
||||
|
||||
|
||||
struct rtable *rt;
|
||||
struct net_device *tdev;
|
||||
struct iphdr *iph;
|
||||
int max_headroom;
|
||||
|
||||
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
|
||||
goto tx_error;
|
||||
goto tx_drop;
|
||||
|
||||
rt = pptp_route_output(po, &fl4);
|
||||
if (IS_ERR(rt))
|
||||
goto tx_error;
|
||||
goto tx_drop;
|
||||
|
||||
tdev = rt->dst.dev;
|
||||
|
||||
@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
|
||||
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
|
||||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||
if (!new_skb) {
|
||||
ip_rt_put(rt);
|
||||
|
||||
if (!new_skb)
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(new_skb, skb->sk);
|
||||
consume_skb(skb);
|
||||
skb = new_skb;
|
||||
}
|
||||
|
||||
/* Ensure we can safely access protocol field and LCP code */
|
||||
if (!pskb_may_pull(skb, 3))
|
||||
goto tx_error;
|
||||
|
||||
data = skb->data;
|
||||
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
|
||||
|
||||
@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
return 1;
|
||||
|
||||
tx_error:
|
||||
ip_rt_put(rt);
|
||||
tx_drop:
|
||||
kfree_skb(skb);
|
||||
return 1;
|
||||
}
|
||||
|
@ -3033,6 +3033,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
|
||||
skb->transport_header = offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_reset_transport_header_careful - conditionally reset transport header
|
||||
* @skb: buffer to alter
|
||||
*
|
||||
* Hardened version of skb_reset_transport_header().
|
||||
*
|
||||
* Returns: true if the operation was a success.
|
||||
*/
|
||||
static inline bool __must_check
|
||||
skb_reset_transport_header_careful(struct sk_buff *skb)
|
||||
{
|
||||
long offset = skb->data - skb->head;
|
||||
|
||||
if (unlikely(offset != (typeof(skb->transport_header))offset))
|
||||
return false;
|
||||
|
||||
if (unlikely(offset == (typeof(skb->transport_header))~0U))
|
||||
return false;
|
||||
|
||||
skb->transport_header = offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void skb_set_transport_header(struct sk_buff *skb,
|
||||
const int offset)
|
||||
{
|
||||
|
@ -568,11 +568,23 @@ static inline struct net_device *dst_dev(const struct dst_entry *dst)
|
||||
return READ_ONCE(dst->dev);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
/* In the future, use rcu_dereference(dst->dev) */
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
return READ_ONCE(dst->dev);
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev_rcu(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
|
||||
{
|
||||
return dev_net(skb_dst_dev(skb));
|
||||
|
@ -586,6 +586,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
||||
{
|
||||
netdev_features_t features = NETIF_F_SG;
|
||||
struct sk_buff *segs;
|
||||
int drop_count;
|
||||
|
||||
/*
|
||||
* Segmentation in UDP receive path is only for UDP GRO, drop udp
|
||||
* fragmentation offload (UFO) packets.
|
||||
*/
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
|
||||
drop_count = 1;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Avoid csum recalculation by skb_segment unless userspace explicitly
|
||||
* asks for the final checksum values
|
||||
@ -609,16 +619,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
||||
*/
|
||||
segs = __skb_gso_segment(skb, features, false);
|
||||
if (IS_ERR_OR_NULL(segs)) {
|
||||
int segs_nr = skb_shinfo(skb)->gso_segs;
|
||||
|
||||
atomic_add(segs_nr, &sk->sk_drops);
|
||||
SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
drop_count = skb_shinfo(skb)->gso_segs;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
return segs;
|
||||
|
||||
drop:
|
||||
atomic_add(drop_count, &sk->sk_drops);
|
||||
SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
|
||||
|
@ -6978,6 +6978,12 @@ int napi_set_threaded(struct napi_struct *napi,
|
||||
if (napi->config)
|
||||
napi->config->threaded = threaded;
|
||||
|
||||
/* Setting/unsetting threaded mode on a napi might not immediately
|
||||
* take effect, if the current napi instance is actively being
|
||||
* polled. In this case, the switch between threaded mode and
|
||||
* softirq mode will happen in the next round of napi_schedule().
|
||||
* This should not cause hiccups/stalls to the live traffic.
|
||||
*/
|
||||
if (!threaded && napi->thread) {
|
||||
napi_stop_kthread(napi);
|
||||
} else {
|
||||
@ -7011,23 +7017,9 @@ int netif_set_threaded(struct net_device *dev,
|
||||
|
||||
WRITE_ONCE(dev->threaded, threaded);
|
||||
|
||||
/* Make sure kthread is created before THREADED bit
|
||||
* is set.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
|
||||
/* Setting/unsetting threaded mode on a napi might not immediately
|
||||
* take effect, if the current napi instance is actively being
|
||||
* polled. In this case, the switch between threaded mode and
|
||||
* softirq mode will happen in the next round of napi_schedule().
|
||||
* This should not cause hiccups/stalls to the live traffic.
|
||||
*/
|
||||
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
||||
if (!threaded && napi->thread)
|
||||
napi_stop_kthread(napi);
|
||||
else
|
||||
assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
|
||||
}
|
||||
/* The error should not occur as the kthreads are already created. */
|
||||
list_for_each_entry(napi, &dev->napi_list, dev_list)
|
||||
WARN_ON_ONCE(napi_set_threaded(napi, threaded));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -70,14 +70,13 @@ void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
|
||||
gen_pool_destroy(binding->chunk_pool);
|
||||
|
||||
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
|
||||
DMA_FROM_DEVICE);
|
||||
binding->direction);
|
||||
dma_buf_detach(binding->dmabuf, binding->attachment);
|
||||
dma_buf_put(binding->dmabuf);
|
||||
xa_destroy(&binding->bound_rxqs);
|
||||
kvfree(binding->tx_vec);
|
||||
kfree(binding);
|
||||
}
|
||||
EXPORT_SYMBOL(__net_devmem_dmabuf_binding_free);
|
||||
|
||||
struct net_iov *
|
||||
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
|
||||
@ -208,6 +207,7 @@ net_devmem_bind_dmabuf(struct net_device *dev,
|
||||
mutex_init(&binding->lock);
|
||||
|
||||
binding->dmabuf = dmabuf;
|
||||
binding->direction = direction;
|
||||
|
||||
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
|
||||
if (IS_ERR(binding->attachment)) {
|
||||
@ -312,7 +312,7 @@ err_tx_vec:
|
||||
kvfree(binding->tx_vec);
|
||||
err_unmap:
|
||||
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
|
||||
DMA_FROM_DEVICE);
|
||||
direction);
|
||||
err_detach:
|
||||
dma_buf_detach(dmabuf, binding->attachment);
|
||||
err_free_binding:
|
||||
|
@ -56,6 +56,9 @@ struct net_devmem_dmabuf_binding {
|
||||
*/
|
||||
u32 id;
|
||||
|
||||
/* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
|
||||
enum dma_data_direction direction;
|
||||
|
||||
/* Array of net_iov pointers for this binding, sorted by virtual
|
||||
* address. This array is convenient to map the virtual addresses to
|
||||
* net_iovs in the TX path.
|
||||
@ -165,10 +168,6 @@ static inline void net_devmem_put_net_iov(struct net_iov *niov)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct net_devmem_dmabuf_binding *
|
||||
net_devmem_bind_dmabuf(struct net_device *dev,
|
||||
enum dma_data_direction direction,
|
||||
|
@ -768,6 +768,13 @@ int netpoll_setup(struct netpoll *np)
|
||||
if (err)
|
||||
goto flush;
|
||||
rtnl_unlock();
|
||||
|
||||
/* Make sure all NAPI polls which started before dev->npinfo
|
||||
* was visible have exited before we start calling NAPI poll.
|
||||
* NAPI skips locking if dev->npinfo is NULL.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
|
||||
flush:
|
||||
|
@ -425,15 +425,20 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst_dev(skb), *indev = skb->dev;
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
int ret_val;
|
||||
|
||||
rcu_read_lock();
|
||||
dev = skb_dst_dev_rcu(skb);
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_output);
|
||||
|
||||
|
@ -148,7 +148,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
||||
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (likely(ops && ops->callbacks.gso_segment)) {
|
||||
skb_reset_transport_header(skb);
|
||||
if (!skb_reset_transport_header_careful(skb))
|
||||
goto out;
|
||||
|
||||
segs = ops->callbacks.gso_segment(skb, features);
|
||||
if (!segs)
|
||||
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/splice.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/syscalls.h>
|
||||
@ -1029,6 +1030,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
|
||||
ssize_t copied;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (sock->file->f_flags & O_NONBLOCK || flags & SPLICE_F_NONBLOCK)
|
||||
flags = MSG_DONTWAIT;
|
||||
else
|
||||
flags = 0;
|
||||
|
||||
/* Only support splice for SOCKSEQPACKET */
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, &err);
|
||||
|
@ -1218,7 +1218,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
|
||||
nlk = nlk_sk(sk);
|
||||
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
||||
if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) &&
|
||||
if ((rmem == skb->truesize || rmem <= READ_ONCE(sk->sk_rcvbuf)) &&
|
||||
!test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
|
||||
netlink_skb_set_owner_r(skb, sk);
|
||||
return 0;
|
||||
|
@ -4573,10 +4573,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
spin_lock(&po->bind_lock);
|
||||
was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
|
||||
num = po->num;
|
||||
if (was_running) {
|
||||
WRITE_ONCE(po->num, 0);
|
||||
WRITE_ONCE(po->num, 0);
|
||||
if (was_running)
|
||||
__unregister_prot_hook(sk, false);
|
||||
}
|
||||
|
||||
spin_unlock(&po->bind_lock);
|
||||
|
||||
synchronize_net();
|
||||
@ -4608,10 +4608,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
mutex_unlock(&po->pg_vec_lock);
|
||||
|
||||
spin_lock(&po->bind_lock);
|
||||
if (was_running) {
|
||||
WRITE_ONCE(po->num, num);
|
||||
WRITE_ONCE(po->num, num);
|
||||
if (was_running)
|
||||
register_prot_hook(sk);
|
||||
}
|
||||
|
||||
spin_unlock(&po->bind_lock);
|
||||
if (pg_vec && (po->tp_version > TPACKET_V2)) {
|
||||
/* Because we don't support block-based V3 on tx-ring */
|
||||
|
@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
|
||||
static const struct
|
||||
nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
|
||||
[TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
|
||||
TC_QOPT_MAX_QUEUE),
|
||||
TC_QOPT_MAX_QUEUE - 1),
|
||||
[TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
|
||||
TC_FP_EXPRESS,
|
||||
TC_FP_PREEMPTIBLE),
|
||||
|
@ -43,6 +43,11 @@ static struct static_key_false taprio_have_working_mqprio;
|
||||
#define TAPRIO_SUPPORTED_FLAGS \
|
||||
(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
|
||||
#define TAPRIO_FLAGS_INVALID U32_MAX
|
||||
/* Minimum value for picos_per_byte to ensure non-zero duration
|
||||
* for minimum-sized Ethernet frames (ETH_ZLEN = 60).
|
||||
* 60 * 17 > PSEC_PER_NSEC (1000)
|
||||
*/
|
||||
#define TAPRIO_PICOS_PER_BYTE_MIN 17
|
||||
|
||||
struct sched_entry {
|
||||
/* Durations between this GCL entry and the GCL entry where the
|
||||
@ -1284,7 +1289,8 @@ static void taprio_start_sched(struct Qdisc *sch,
|
||||
}
|
||||
|
||||
static void taprio_set_picos_per_byte(struct net_device *dev,
|
||||
struct taprio_sched *q)
|
||||
struct taprio_sched *q,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ethtool_link_ksettings ecmd;
|
||||
int speed = SPEED_10;
|
||||
@ -1300,6 +1306,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
|
||||
|
||||
skip:
|
||||
picos_per_byte = (USEC_PER_SEC * 8) / speed;
|
||||
if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
|
||||
if (!extack)
|
||||
pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
|
||||
speed);
|
||||
NL_SET_ERR_MSG_FMT_MOD(extack,
|
||||
"Link speed %d is too high. Schedule may be inaccurate.",
|
||||
speed);
|
||||
picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
|
||||
}
|
||||
|
||||
atomic64_set(&q->picos_per_byte, picos_per_byte);
|
||||
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
|
||||
@ -1324,7 +1339,7 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
|
||||
if (dev != qdisc_dev(q->root))
|
||||
continue;
|
||||
|
||||
taprio_set_picos_per_byte(dev, q);
|
||||
taprio_set_picos_per_byte(dev, q, NULL);
|
||||
|
||||
stab = rtnl_dereference(q->root->stab);
|
||||
|
||||
@ -1844,7 +1859,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
q->flags = taprio_flags;
|
||||
|
||||
/* Needed for length_to_duration() during netlink attribute parsing */
|
||||
taprio_set_picos_per_byte(dev, q);
|
||||
taprio_set_picos_per_byte(dev, q, extack);
|
||||
|
||||
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
|
||||
if (err < 0)
|
||||
|
@ -11,6 +11,7 @@ TEST_GEN_FILES := \
|
||||
|
||||
TEST_PROGS := \
|
||||
napi_id.py \
|
||||
napi_threaded.py \
|
||||
netcons_basic.sh \
|
||||
netcons_cmdline.sh \
|
||||
netcons_fragmented_msg.sh \
|
||||
|
111
tools/testing/selftests/drivers/net/napi_threaded.py
Executable file
111
tools/testing/selftests/drivers/net/napi_threaded.py
Executable file
@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
"""
|
||||
Test napi threaded states.
|
||||
"""
|
||||
|
||||
from lib.py import ksft_run, ksft_exit
|
||||
from lib.py import ksft_eq, ksft_ne, ksft_ge
|
||||
from lib.py import NetDrvEnv, NetdevFamily
|
||||
from lib.py import cmd, defer, ethtool
|
||||
|
||||
|
||||
def _assert_napi_threaded_enabled(nl, napi_id) -> None:
|
||||
napi = nl.napi_get({'id': napi_id})
|
||||
ksft_eq(napi['threaded'], 'enabled')
|
||||
ksft_ne(napi.get('pid'), None)
|
||||
|
||||
|
||||
def _assert_napi_threaded_disabled(nl, napi_id) -> None:
|
||||
napi = nl.napi_get({'id': napi_id})
|
||||
ksft_eq(napi['threaded'], 'disabled')
|
||||
ksft_eq(napi.get('pid'), None)
|
||||
|
||||
|
||||
def _set_threaded_state(cfg, threaded) -> None:
|
||||
cmd(f"echo {threaded} > /sys/class/net/{cfg.ifname}/threaded")
|
||||
|
||||
|
||||
def _setup_deferred_cleanup(cfg) -> None:
|
||||
combined = ethtool(f"-l {cfg.ifname}", json=True)[0].get("combined", 0)
|
||||
ksft_ge(combined, 2)
|
||||
defer(ethtool, f"-L {cfg.ifname} combined {combined}")
|
||||
|
||||
threaded = cmd(f"cat /sys/class/net/{cfg.ifname}/threaded").stdout
|
||||
defer(_set_threaded_state, cfg, threaded)
|
||||
|
||||
|
||||
def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None:
|
||||
"""
|
||||
Test that when napi threaded is enabled at device level and
|
||||
then disabled at napi level for one napi, the threaded state
|
||||
of all napis is preserved after a change in number of queues.
|
||||
"""
|
||||
|
||||
napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
|
||||
ksft_ge(len(napis), 2)
|
||||
|
||||
napi0_id = napis[0]['id']
|
||||
napi1_id = napis[1]['id']
|
||||
|
||||
_setup_deferred_cleanup(cfg)
|
||||
|
||||
# set threaded
|
||||
_set_threaded_state(cfg, 1)
|
||||
|
||||
# check napi threaded is set for both napis
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
_assert_napi_threaded_enabled(nl, napi1_id)
|
||||
|
||||
# disable threaded for napi1
|
||||
nl.napi_set({'id': napi1_id, 'threaded': 'disabled'})
|
||||
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 1")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 2")
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
_assert_napi_threaded_disabled(nl, napi1_id)
|
||||
|
||||
|
||||
def change_num_queues(cfg, nl) -> None:
|
||||
"""
|
||||
Test that when napi threaded is enabled at device level,
|
||||
the napi threaded state is preserved after a change in
|
||||
number of queues.
|
||||
"""
|
||||
|
||||
napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
|
||||
ksft_ge(len(napis), 2)
|
||||
|
||||
napi0_id = napis[0]['id']
|
||||
napi1_id = napis[1]['id']
|
||||
|
||||
_setup_deferred_cleanup(cfg)
|
||||
|
||||
# set threaded
|
||||
_set_threaded_state(cfg, 1)
|
||||
|
||||
# check napi threaded is set for both napis
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
_assert_napi_threaded_enabled(nl, napi1_id)
|
||||
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 1")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 2")
|
||||
|
||||
# check napi threaded is set for both napis
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
_assert_napi_threaded_enabled(nl, napi1_id)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
""" Ksft boiler plate main """
|
||||
|
||||
with NetDrvEnv(__file__, queue_count=2) as cfg:
|
||||
ksft_run([change_num_queues,
|
||||
enable_dev_threaded_disable_napi_threaded],
|
||||
args=(cfg, NetdevFamily()))
|
||||
ksft_exit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1053,6 +1053,6 @@ trap cleanup EXIT
|
||||
|
||||
setup_prepare
|
||||
|
||||
tests_run
|
||||
xfail_on_slow tests_run
|
||||
|
||||
exit $EXIT_STATUS
|
||||
|
@ -35,24 +35,7 @@ failfunc=ktap_test_fail
|
||||
|
||||
if [[ -n "${KSFT_MACHINE_SLOW}" ]]; then
|
||||
optargs+=('--tolerance_usecs=14000')
|
||||
|
||||
# xfail tests that are known flaky with dbg config, not fixable.
|
||||
# still run them for coverage (and expect 100% pass without dbg).
|
||||
declare -ar xfail_list=(
|
||||
"tcp_blocking_blocking-connect.pkt"
|
||||
"tcp_blocking_blocking-read.pkt"
|
||||
"tcp_eor_no-coalesce-retrans.pkt"
|
||||
"tcp_fast_recovery_prr-ss.*.pkt"
|
||||
"tcp_sack_sack-route-refresh-ip-tos.pkt"
|
||||
"tcp_slow_start_slow-start-after-win-update.pkt"
|
||||
"tcp_timestamping.*.pkt"
|
||||
"tcp_user_timeout_user-timeout-probe.pkt"
|
||||
"tcp_zerocopy_cl.*.pkt"
|
||||
"tcp_zerocopy_epoll_.*.pkt"
|
||||
"tcp_tcp_info_tcp-info-.*-limited.pkt"
|
||||
)
|
||||
readonly xfail_regex="^($(printf '%s|' "${xfail_list[@]}"))$"
|
||||
[[ "$script" =~ ${xfail_regex} ]] && failfunc=ktap_test_xfail
|
||||
failfunc=ktap_test_xfail
|
||||
fi
|
||||
|
||||
ktap_print_header
|
||||
|
@ -289,11 +289,11 @@ extern_valid_common()
|
||||
orig_base_reachable=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh1")) | .["base_reachable"]')
|
||||
run_cmd "ip ntable change name $tbl_name thresh1 10 base_reachable 10000"
|
||||
orig_gc_stale=$(ip -n "$ns1" -j ntable show name "$tbl_name" dev veth0 | jq '.[]["gc_stale"]')
|
||||
run_cmd "ip -n $ns1 ntable change name $tbl_name dev veth0 gc_stale 5000"
|
||||
# Wait orig_base_reachable/2 for the new interval to take effect.
|
||||
run_cmd "sleep $(((orig_base_reachable / 1000) / 2 + 2))"
|
||||
run_cmd "ip -n $ns1 ntable change name $tbl_name dev veth0 gc_stale 1000"
|
||||
run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
|
||||
run_cmd "ip -n $ns1 neigh add ${subnet}3 lladdr $mac nud stale dev veth0"
|
||||
# Wait orig_base_reachable/2 for the new interval to take effect.
|
||||
run_cmd "sleep $(((orig_base_reachable / 1000) / 2 + 2))"
|
||||
for i in {1..20}; do
|
||||
run_cmd "ip -n $ns1 neigh add ${subnet}$((i + 4)) nud none dev veth0"
|
||||
done
|
||||
|
@ -55,10 +55,10 @@ test_vlan0_del_crash_01() {
|
||||
ip netns exec ${NETNS} ip link add bond0 type bond mode 0
|
||||
ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
|
||||
ip netns exec ${NETNS} ifconfig bond0 up
|
||||
ip netns exec ${NETNS} ip link set dev bond0 up
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
|
||||
ip netns exec ${NETNS} ifconfig bond0 down
|
||||
ip netns exec ${NETNS} ifconfig bond0 up
|
||||
ip netns exec ${NETNS} ip link set dev bond0 down
|
||||
ip netns exec ${NETNS} ip link set dev bond0 up
|
||||
ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
|
||||
cleanup
|
||||
}
|
||||
@ -68,11 +68,11 @@ test_vlan0_del_crash_02() {
|
||||
setup
|
||||
ip netns exec ${NETNS} ip link add bond0 type bond mode 0
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
|
||||
ip netns exec ${NETNS} ifconfig bond0 up
|
||||
ip netns exec ${NETNS} ip link set dev bond0 up
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
|
||||
ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
|
||||
ip netns exec ${NETNS} ifconfig bond0 down
|
||||
ip netns exec ${NETNS} ifconfig bond0 up
|
||||
ip netns exec ${NETNS} ip link set dev bond0 down
|
||||
ip netns exec ${NETNS} ip link set dev bond0 up
|
||||
ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
|
||||
cleanup
|
||||
}
|
||||
@ -84,9 +84,9 @@ test_vlan0_del_crash_03() {
|
||||
ip netns exec ${NETNS} ip link add bond0 type bond mode 0
|
||||
ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
|
||||
ip netns exec ${NETNS} ifconfig bond0 up
|
||||
ip netns exec ${NETNS} ip link set dev bond0 up
|
||||
ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
|
||||
ip netns exec ${NETNS} ifconfig bond0 down
|
||||
ip netns exec ${NETNS} ip link set dev bond0 down
|
||||
ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
|
||||
cleanup
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user