|
|
@@ -0,0 +1,1110 @@
|
|
|
+From: Felix Fietkau <[email protected]>
|
|
|
+Date: Tue, 8 Sep 2020 12:16:26 +0200
|
|
|
+Subject: [PATCH] mac80211: reorganize code to remove a forward
|
|
|
+ declaration
|
|
|
+
|
|
|
+Remove the newly added ieee80211_set_vif_encap_ops declaration.
|
|
|
+No further code changes
|
|
|
+
|
|
|
+Signed-off-by: Felix Fietkau <[email protected]>
|
|
|
+---
|
|
|
+
|
|
|
+--- a/net/mac80211/iface.c
|
|
|
++++ b/net/mac80211/iface.c
|
|
|
+@@ -43,7 +43,6 @@
|
|
|
+ */
|
|
|
+
|
|
|
+ static void ieee80211_iface_work(struct work_struct *work);
|
|
|
+-static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata);
|
|
|
+
|
|
|
+ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
|
|
|
+ {
|
|
|
+@@ -349,6 +348,511 @@ static int ieee80211_check_queues(struct
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
++static int ieee80211_open(struct net_device *dev)
|
|
|
++{
|
|
|
++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
++ int err;
|
|
|
++
|
|
|
++ /* fail early if user set an invalid address */
|
|
|
++ if (!is_valid_ether_addr(dev->dev_addr))
|
|
|
++ return -EADDRNOTAVAIL;
|
|
|
++
|
|
|
++ err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
|
|
|
++ if (err)
|
|
|
++ return err;
|
|
|
++
|
|
|
++ return ieee80211_do_open(&sdata->wdev, true);
|
|
|
++}
|
|
|
++
|
|
|
++static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
|
++ bool going_down)
|
|
|
++{
|
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
|
++ unsigned long flags;
|
|
|
++ struct sk_buff *skb, *tmp;
|
|
|
++ u32 hw_reconf_flags = 0;
|
|
|
++ int i, flushed;
|
|
|
++ struct ps_data *ps;
|
|
|
++ struct cfg80211_chan_def chandef;
|
|
|
++ bool cancel_scan;
|
|
|
++ struct cfg80211_nan_func *func;
|
|
|
++
|
|
|
++ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
|
|
++
|
|
|
++ cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
|
|
|
++ if (cancel_scan)
|
|
|
++ ieee80211_scan_cancel(local);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Stop TX on this interface first.
|
|
|
++ */
|
|
|
++ if (sdata->dev)
|
|
|
++ netif_tx_stop_all_queues(sdata->dev);
|
|
|
++
|
|
|
++ ieee80211_roc_purge(local, sdata);
|
|
|
++
|
|
|
++ switch (sdata->vif.type) {
|
|
|
++ case NL80211_IFTYPE_STATION:
|
|
|
++ ieee80211_mgd_stop(sdata);
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_ADHOC:
|
|
|
++ ieee80211_ibss_stop(sdata);
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_MONITOR:
|
|
|
++ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
|
|
|
++ break;
|
|
|
++ list_del_rcu(&sdata->u.mntr.list);
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ break;
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Remove all stations associated with this interface.
|
|
|
++ *
|
|
|
++ * This must be done before calling ops->remove_interface()
|
|
|
++ * because otherwise we can later invoke ops->sta_notify()
|
|
|
++ * whenever the STAs are removed, and that invalidates driver
|
|
|
++ * assumptions about always getting a vif pointer that is valid
|
|
|
++ * (because if we remove a STA after ops->remove_interface()
|
|
|
++ * the driver will have removed the vif info already!)
|
|
|
++ *
|
|
|
++ * In WDS mode a station must exist here and be flushed, for
|
|
|
++ * AP_VLANs stations may exist since there's nothing else that
|
|
|
++ * would have removed them, but in other modes there shouldn't
|
|
|
++ * be any stations.
|
|
|
++ */
|
|
|
++ flushed = sta_info_flush(sdata);
|
|
|
++ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
|
|
|
++ ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
|
|
|
++ (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
|
|
|
++
|
|
|
++ /* don't count this interface for allmulti while it is down */
|
|
|
++ if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
|
|
|
++ atomic_dec(&local->iff_allmultis);
|
|
|
++
|
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
++ local->fif_pspoll--;
|
|
|
++ local->fif_probe_req--;
|
|
|
++ } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
|
|
|
++ local->fif_probe_req--;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (sdata->dev) {
|
|
|
++ netif_addr_lock_bh(sdata->dev);
|
|
|
++ spin_lock_bh(&local->filter_lock);
|
|
|
++ __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
|
|
|
++ sdata->dev->addr_len);
|
|
|
++ spin_unlock_bh(&local->filter_lock);
|
|
|
++ netif_addr_unlock_bh(sdata->dev);
|
|
|
++ }
|
|
|
++
|
|
|
++ del_timer_sync(&local->dynamic_ps_timer);
|
|
|
++ cancel_work_sync(&local->dynamic_ps_enable_work);
|
|
|
++
|
|
|
++ cancel_work_sync(&sdata->recalc_smps);
|
|
|
++ sdata_lock(sdata);
|
|
|
++ mutex_lock(&local->mtx);
|
|
|
++ sdata->vif.csa_active = false;
|
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_STATION)
|
|
|
++ sdata->u.mgd.csa_waiting_bcn = false;
|
|
|
++ if (sdata->csa_block_tx) {
|
|
|
++ ieee80211_wake_vif_queues(local, sdata,
|
|
|
++ IEEE80211_QUEUE_STOP_REASON_CSA);
|
|
|
++ sdata->csa_block_tx = false;
|
|
|
++ }
|
|
|
++ mutex_unlock(&local->mtx);
|
|
|
++ sdata_unlock(sdata);
|
|
|
++
|
|
|
++ cancel_work_sync(&sdata->csa_finalize_work);
|
|
|
++
|
|
|
++ cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
|
|
|
++
|
|
|
++ if (sdata->wdev.cac_started) {
|
|
|
++ chandef = sdata->vif.bss_conf.chandef;
|
|
|
++ WARN_ON(local->suspended);
|
|
|
++ mutex_lock(&local->mtx);
|
|
|
++ ieee80211_vif_release_channel(sdata);
|
|
|
++ mutex_unlock(&local->mtx);
|
|
|
++ cfg80211_cac_event(sdata->dev, &chandef,
|
|
|
++ NL80211_RADAR_CAC_ABORTED,
|
|
|
++ GFP_KERNEL);
|
|
|
++ }
|
|
|
++
|
|
|
++ /* APs need special treatment */
|
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
++ struct ieee80211_sub_if_data *vlan, *tmpsdata;
|
|
|
++
|
|
|
++ /* down all dependent devices, that is VLANs */
|
|
|
++ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
|
|
|
++ u.vlan.list)
|
|
|
++ dev_close(vlan->dev);
|
|
|
++ WARN_ON(!list_empty(&sdata->u.ap.vlans));
|
|
|
++ } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
++ /* remove all packets in parent bc_buf pointing to this dev */
|
|
|
++ ps = &sdata->bss->ps;
|
|
|
++
|
|
|
++ spin_lock_irqsave(&ps->bc_buf.lock, flags);
|
|
|
++ skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
|
|
|
++ if (skb->dev == sdata->dev) {
|
|
|
++ __skb_unlink(skb, &ps->bc_buf);
|
|
|
++ local->total_ps_buffered--;
|
|
|
++ ieee80211_free_txskb(&local->hw, skb);
|
|
|
++ }
|
|
|
++ }
|
|
|
++ spin_unlock_irqrestore(&ps->bc_buf.lock, flags);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (going_down)
|
|
|
++ local->open_count--;
|
|
|
++
|
|
|
++ switch (sdata->vif.type) {
|
|
|
++ case NL80211_IFTYPE_AP_VLAN:
|
|
|
++ mutex_lock(&local->mtx);
|
|
|
++ list_del(&sdata->u.vlan.list);
|
|
|
++ mutex_unlock(&local->mtx);
|
|
|
++ RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
|
|
|
++ /* see comment in the default case below */
|
|
|
++ ieee80211_free_keys(sdata, true);
|
|
|
++ /* no need to tell driver */
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_MONITOR:
|
|
|
++ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
|
|
|
++ local->cooked_mntrs--;
|
|
|
++ break;
|
|
|
++ }
|
|
|
++
|
|
|
++ local->monitors--;
|
|
|
++ if (local->monitors == 0) {
|
|
|
++ local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
|
|
|
++ hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
|
|
|
++ }
|
|
|
++
|
|
|
++ ieee80211_adjust_monitor_flags(sdata, -1);
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_NAN:
|
|
|
++ /* clean all the functions */
|
|
|
++ spin_lock_bh(&sdata->u.nan.func_lock);
|
|
|
++
|
|
|
++ idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) {
|
|
|
++ idr_remove(&sdata->u.nan.function_inst_ids, i);
|
|
|
++ cfg80211_free_nan_func(func);
|
|
|
++ }
|
|
|
++ idr_destroy(&sdata->u.nan.function_inst_ids);
|
|
|
++
|
|
|
++ spin_unlock_bh(&sdata->u.nan.func_lock);
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_P2P_DEVICE:
|
|
|
++ /* relies on synchronize_rcu() below */
|
|
|
++ RCU_INIT_POINTER(local->p2p_sdata, NULL);
|
|
|
++ /* fall through */
|
|
|
++ default:
|
|
|
++ cancel_work_sync(&sdata->work);
|
|
|
++ /*
|
|
|
++ * When we get here, the interface is marked down.
|
|
|
++ * Free the remaining keys, if there are any
|
|
|
++ * (which can happen in AP mode if userspace sets
|
|
|
++ * keys before the interface is operating, and maybe
|
|
|
++ * also in WDS mode)
|
|
|
++ *
|
|
|
++ * Force the key freeing to always synchronize_net()
|
|
|
++ * to wait for the RX path in case it is using this
|
|
|
++ * interface enqueuing frames at this very time on
|
|
|
++ * another CPU.
|
|
|
++ */
|
|
|
++ ieee80211_free_keys(sdata, true);
|
|
|
++ skb_queue_purge(&sdata->skb_queue);
|
|
|
++ }
|
|
|
++
|
|
|
++ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
++ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
|
|
|
++ skb_queue_walk_safe(&local->pending[i], skb, tmp) {
|
|
|
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
++ if (info->control.vif == &sdata->vif) {
|
|
|
++ __skb_unlink(skb, &local->pending[i]);
|
|
|
++ ieee80211_free_txskb(&local->hw, skb);
|
|
|
++ }
|
|
|
++ }
|
|
|
++ }
|
|
|
++ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
++
|
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
++ ieee80211_txq_remove_vlan(local, sdata);
|
|
|
++
|
|
|
++ sdata->bss = NULL;
|
|
|
++
|
|
|
++ if (local->open_count == 0)
|
|
|
++ ieee80211_clear_tx_pending(local);
|
|
|
++
|
|
|
++ sdata->vif.bss_conf.beacon_int = 0;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If the interface goes down while suspended, presumably because
|
|
|
++ * the device was unplugged and that happens before our resume,
|
|
|
++ * then the driver is already unconfigured and the remainder of
|
|
|
++ * this function isn't needed.
|
|
|
++ * XXX: what about WoWLAN? If the device has software state, e.g.
|
|
|
++ * memory allocated, it might expect teardown commands from
|
|
|
++ * mac80211 here?
|
|
|
++ */
|
|
|
++ if (local->suspended) {
|
|
|
++ WARN_ON(local->wowlan);
|
|
|
++ WARN_ON(rtnl_dereference(local->monitor_sdata));
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ switch (sdata->vif.type) {
|
|
|
++ case NL80211_IFTYPE_AP_VLAN:
|
|
|
++ break;
|
|
|
++ case NL80211_IFTYPE_MONITOR:
|
|
|
++ if (local->monitors == 0)
|
|
|
++ ieee80211_del_virtual_monitor(local);
|
|
|
++
|
|
|
++ mutex_lock(&local->mtx);
|
|
|
++ ieee80211_recalc_idle(local);
|
|
|
++ mutex_unlock(&local->mtx);
|
|
|
++
|
|
|
++ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
|
|
|
++ break;
|
|
|
++
|
|
|
++ /* fall through */
|
|
|
++ default:
|
|
|
++ if (going_down)
|
|
|
++ drv_remove_interface(local, sdata);
|
|
|
++ }
|
|
|
++
|
|
|
++ ieee80211_recalc_ps(local);
|
|
|
++
|
|
|
++ if (cancel_scan)
|
|
|
++ flush_delayed_work(&local->scan_work);
|
|
|
++
|
|
|
++ if (local->open_count == 0) {
|
|
|
++ ieee80211_stop_device(local);
|
|
|
++
|
|
|
++ /* no reconfiguring after stop! */
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* do after stop to avoid reconfiguring when we stop anyway */
|
|
|
++ ieee80211_configure_filter(local);
|
|
|
++ ieee80211_hw_config(local, hw_reconf_flags);
|
|
|
++
|
|
|
++ if (local->monitors == local->open_count)
|
|
|
++ ieee80211_add_virtual_monitor(local);
|
|
|
++}
|
|
|
++
|
|
|
++static int ieee80211_stop(struct net_device *dev)
|
|
|
++{
|
|
|
++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
++
|
|
|
++ ieee80211_do_stop(sdata, true);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static void ieee80211_set_multicast_list(struct net_device *dev)
|
|
|
++{
|
|
|
++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
|
++ int allmulti, sdata_allmulti;
|
|
|
++
|
|
|
++ allmulti = !!(dev->flags & IFF_ALLMULTI);
|
|
|
++ sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
|
|
|
++
|
|
|
++ if (allmulti != sdata_allmulti) {
|
|
|
++ if (dev->flags & IFF_ALLMULTI)
|
|
|
++ atomic_inc(&local->iff_allmultis);
|
|
|
++ else
|
|
|
++ atomic_dec(&local->iff_allmultis);
|
|
|
++ sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
|
|
|
++ }
|
|
|
++
|
|
|
++ spin_lock_bh(&local->filter_lock);
|
|
|
++ __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
|
|
|
++ spin_unlock_bh(&local->filter_lock);
|
|
|
++ ieee80211_queue_work(&local->hw, &local->reconfig_filter);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Called when the netdev is removed or, by the code below, before
|
|
|
++ * the interface type changes.
|
|
|
++ */
|
|
|
++static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
|
|
|
++{
|
|
|
++ int i;
|
|
|
++
|
|
|
++ /* free extra data */
|
|
|
++ ieee80211_free_keys(sdata, false);
|
|
|
++
|
|
|
++ ieee80211_debugfs_remove_netdev(sdata);
|
|
|
++
|
|
|
++ for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
|
|
|
++ __skb_queue_purge(&sdata->fragments[i].skb_list);
|
|
|
++ sdata->fragment_next = 0;
|
|
|
++
|
|
|
++ if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
++ ieee80211_mesh_teardown_sdata(sdata);
|
|
|
++}
|
|
|
++
|
|
|
++static void ieee80211_uninit(struct net_device *dev)
|
|
|
++{
|
|
|
++ ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
|
|
|
++}
|
|
|
++
|
|
|
++#if LINUX_VERSION_IS_GEQ(5,2,0)
|
|
|
++static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ struct net_device *sb_dev)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(4,19,0)
|
|
|
++static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ struct net_device *sb_dev,
|
|
|
++ select_queue_fallback_t fallback)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
|
|
|
++ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
|
|
|
++static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ void *accel_priv,
|
|
|
++ select_queue_fallback_t fallback)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(3,13,0)
|
|
|
++static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ void *accel_priv)
|
|
|
++#else
|
|
|
++static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb)
|
|
|
++#endif
|
|
|
++{
|
|
|
++ return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
|
|
|
++}
|
|
|
++
|
|
|
++static void
|
|
|
++ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
++{
|
|
|
++ int i;
|
|
|
++
|
|
|
++ for_each_possible_cpu(i) {
|
|
|
++ const struct pcpu_sw_netstats *tstats;
|
|
|
++ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
|
|
|
++ unsigned int start;
|
|
|
++
|
|
|
++ tstats = per_cpu_ptr(netdev_tstats(dev), i);
|
|
|
++
|
|
|
++ do {
|
|
|
++ start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
|
|
++ rx_packets = tstats->rx_packets;
|
|
|
++ tx_packets = tstats->tx_packets;
|
|
|
++ rx_bytes = tstats->rx_bytes;
|
|
|
++ tx_bytes = tstats->tx_bytes;
|
|
|
++ } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
|
|
++
|
|
|
++ stats->rx_packets += rx_packets;
|
|
|
++ stats->tx_packets += tx_packets;
|
|
|
++ stats->rx_bytes += rx_bytes;
|
|
|
++ stats->tx_bytes += tx_bytes;
|
|
|
++ }
|
|
|
++}
|
|
|
++#if LINUX_VERSION_IS_LESS(4,11,0)
|
|
|
++/* Just declare it here to keep sparse happy */
|
|
|
++struct rtnl_link_stats64 *bp_ieee80211_get_stats64(struct net_device *dev,
|
|
|
++ struct rtnl_link_stats64 *stats);
|
|
|
++struct rtnl_link_stats64 *
|
|
|
++bp_ieee80211_get_stats64(struct net_device *dev,
|
|
|
++ struct rtnl_link_stats64 *stats){
|
|
|
++ ieee80211_get_stats64(dev, stats);
|
|
|
++ return stats;
|
|
|
++}
|
|
|
++#endif
|
|
|
++
|
|
|
++static const struct net_device_ops ieee80211_dataif_ops = {
|
|
|
++ .ndo_open = ieee80211_open,
|
|
|
++ .ndo_stop = ieee80211_stop,
|
|
|
++ .ndo_uninit = ieee80211_uninit,
|
|
|
++ .ndo_start_xmit = ieee80211_subif_start_xmit,
|
|
|
++ .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
++ .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
++ .ndo_select_queue = ieee80211_netdev_select_queue,
|
|
|
++#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
++ .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
++#else
|
|
|
++ .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
++#endif
|
|
|
++
|
|
|
++};
|
|
|
++
|
|
|
++#if LINUX_VERSION_IS_GEQ(5,2,0)
|
|
|
++static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ struct net_device *sb_dev)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(4,19,0)
|
|
|
++static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ struct net_device *sb_dev,
|
|
|
++ select_queue_fallback_t fallback)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
|
|
|
++ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
|
|
|
++static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ void *accel_priv,
|
|
|
++ select_queue_fallback_t fallback)
|
|
|
++#elif LINUX_VERSION_IS_GEQ(3,13,0)
|
|
|
++static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb,
|
|
|
++ void *accel_priv)
|
|
|
++#else
|
|
|
++static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
++ struct sk_buff *skb)
|
|
|
++#endif
|
|
|
++{
|
|
|
++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
|
++ struct ieee80211_hdr *hdr;
|
|
|
++ struct ieee80211_radiotap_header *rtap = (void *)skb->data;
|
|
|
++
|
|
|
++ if (local->hw.queues < IEEE80211_NUM_ACS)
|
|
|
++ return 0;
|
|
|
++
|
|
|
++ if (skb->len < 4 ||
|
|
|
++ skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
|
|
|
++ return 0; /* doesn't matter, frame will be dropped */
|
|
|
++
|
|
|
++ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
|
|
|
++
|
|
|
++ return ieee80211_select_queue_80211(sdata, skb, hdr);
|
|
|
++}
|
|
|
++
|
|
|
++static const struct net_device_ops ieee80211_monitorif_ops = {
|
|
|
++ .ndo_open = ieee80211_open,
|
|
|
++ .ndo_stop = ieee80211_stop,
|
|
|
++ .ndo_uninit = ieee80211_uninit,
|
|
|
++ .ndo_start_xmit = ieee80211_monitor_start_xmit,
|
|
|
++ .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
++ .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
++ .ndo_select_queue = ieee80211_monitor_select_queue,
|
|
|
++#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
++ .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
++#else
|
|
|
++ .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
++#endif
|
|
|
++
|
|
|
++};
|
|
|
++
|
|
|
++static const struct net_device_ops ieee80211_dataif_8023_ops = {
|
|
|
++ .ndo_open = ieee80211_open,
|
|
|
++ .ndo_stop = ieee80211_stop,
|
|
|
++ .ndo_uninit = ieee80211_uninit,
|
|
|
++ .ndo_start_xmit = ieee80211_subif_start_xmit_8023,
|
|
|
++ .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
++ .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
++ .ndo_select_queue = ieee80211_netdev_select_queue,
|
|
|
++#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
++ .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
++#else
|
|
|
++ .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
++#endif
|
|
|
++
|
|
|
++};
|
|
|
++
|
|
|
+ static bool ieee80211_iftype_supports_encap_offload(enum nl80211_iftype iftype)
|
|
|
+ {
|
|
|
+ switch (iftype) {
|
|
|
+@@ -389,6 +893,31 @@ static bool ieee80211_set_sdata_offload_
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
++static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata)
|
|
|
++{
|
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
|
++ struct ieee80211_sub_if_data *bss = sdata;
|
|
|
++ bool enabled;
|
|
|
++
|
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
++ if (!sdata->bss)
|
|
|
++ return;
|
|
|
++
|
|
|
++ bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) ||
|
|
|
++ !ieee80211_iftype_supports_encap_offload(bss->vif.type))
|
|
|
++ return;
|
|
|
++
|
|
|
++ enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED;
|
|
|
++ if (sdata->wdev.use_4addr &&
|
|
|
++ !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR))
|
|
|
++ enabled = false;
|
|
|
++
|
|
|
++ sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops :
|
|
|
++ &ieee80211_dataif_ops;
|
|
|
++}
|
|
|
+
|
|
|
+ static void ieee80211_recalc_sdata_offload(struct ieee80211_sub_if_data *sdata)
|
|
|
+ {
|
|
|
+@@ -866,511 +1395,6 @@ int ieee80211_do_open(struct wireless_de
|
|
|
+ return res;
|
|
|
+ }
|
|
|
+
|
|
|
+-static int ieee80211_open(struct net_device *dev)
|
|
|
+-{
|
|
|
+- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
+- int err;
|
|
|
+-
|
|
|
+- /* fail early if user set an invalid address */
|
|
|
+- if (!is_valid_ether_addr(dev->dev_addr))
|
|
|
+- return -EADDRNOTAVAIL;
|
|
|
+-
|
|
|
+- err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
|
|
|
+- if (err)
|
|
|
+- return err;
|
|
|
+-
|
|
|
+- return ieee80211_do_open(&sdata->wdev, true);
|
|
|
+-}
|
|
|
+-
|
|
|
+-static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
|
+- bool going_down)
|
|
|
+-{
|
|
|
+- struct ieee80211_local *local = sdata->local;
|
|
|
+- unsigned long flags;
|
|
|
+- struct sk_buff *skb, *tmp;
|
|
|
+- u32 hw_reconf_flags = 0;
|
|
|
+- int i, flushed;
|
|
|
+- struct ps_data *ps;
|
|
|
+- struct cfg80211_chan_def chandef;
|
|
|
+- bool cancel_scan;
|
|
|
+- struct cfg80211_nan_func *func;
|
|
|
+-
|
|
|
+- clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
|
|
+-
|
|
|
+- cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
|
|
|
+- if (cancel_scan)
|
|
|
+- ieee80211_scan_cancel(local);
|
|
|
+-
|
|
|
+- /*
|
|
|
+- * Stop TX on this interface first.
|
|
|
+- */
|
|
|
+- if (sdata->dev)
|
|
|
+- netif_tx_stop_all_queues(sdata->dev);
|
|
|
+-
|
|
|
+- ieee80211_roc_purge(local, sdata);
|
|
|
+-
|
|
|
+- switch (sdata->vif.type) {
|
|
|
+- case NL80211_IFTYPE_STATION:
|
|
|
+- ieee80211_mgd_stop(sdata);
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_ADHOC:
|
|
|
+- ieee80211_ibss_stop(sdata);
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_MONITOR:
|
|
|
+- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
|
|
|
+- break;
|
|
|
+- list_del_rcu(&sdata->u.mntr.list);
|
|
|
+- break;
|
|
|
+- default:
|
|
|
+- break;
|
|
|
+- }
|
|
|
+-
|
|
|
+- /*
|
|
|
+- * Remove all stations associated with this interface.
|
|
|
+- *
|
|
|
+- * This must be done before calling ops->remove_interface()
|
|
|
+- * because otherwise we can later invoke ops->sta_notify()
|
|
|
+- * whenever the STAs are removed, and that invalidates driver
|
|
|
+- * assumptions about always getting a vif pointer that is valid
|
|
|
+- * (because if we remove a STA after ops->remove_interface()
|
|
|
+- * the driver will have removed the vif info already!)
|
|
|
+- *
|
|
|
+- * In WDS mode a station must exist here and be flushed, for
|
|
|
+- * AP_VLANs stations may exist since there's nothing else that
|
|
|
+- * would have removed them, but in other modes there shouldn't
|
|
|
+- * be any stations.
|
|
|
+- */
|
|
|
+- flushed = sta_info_flush(sdata);
|
|
|
+- WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
|
|
|
+- ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
|
|
|
+- (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
|
|
|
+-
|
|
|
+- /* don't count this interface for allmulti while it is down */
|
|
|
+- if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
|
|
|
+- atomic_dec(&local->iff_allmultis);
|
|
|
+-
|
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
+- local->fif_pspoll--;
|
|
|
+- local->fif_probe_req--;
|
|
|
+- } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
|
|
|
+- local->fif_probe_req--;
|
|
|
+- }
|
|
|
+-
|
|
|
+- if (sdata->dev) {
|
|
|
+- netif_addr_lock_bh(sdata->dev);
|
|
|
+- spin_lock_bh(&local->filter_lock);
|
|
|
+- __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
|
|
|
+- sdata->dev->addr_len);
|
|
|
+- spin_unlock_bh(&local->filter_lock);
|
|
|
+- netif_addr_unlock_bh(sdata->dev);
|
|
|
+- }
|
|
|
+-
|
|
|
+- del_timer_sync(&local->dynamic_ps_timer);
|
|
|
+- cancel_work_sync(&local->dynamic_ps_enable_work);
|
|
|
+-
|
|
|
+- cancel_work_sync(&sdata->recalc_smps);
|
|
|
+- sdata_lock(sdata);
|
|
|
+- mutex_lock(&local->mtx);
|
|
|
+- sdata->vif.csa_active = false;
|
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_STATION)
|
|
|
+- sdata->u.mgd.csa_waiting_bcn = false;
|
|
|
+- if (sdata->csa_block_tx) {
|
|
|
+- ieee80211_wake_vif_queues(local, sdata,
|
|
|
+- IEEE80211_QUEUE_STOP_REASON_CSA);
|
|
|
+- sdata->csa_block_tx = false;
|
|
|
+- }
|
|
|
+- mutex_unlock(&local->mtx);
|
|
|
+- sdata_unlock(sdata);
|
|
|
+-
|
|
|
+- cancel_work_sync(&sdata->csa_finalize_work);
|
|
|
+-
|
|
|
+- cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
|
|
|
+-
|
|
|
+- if (sdata->wdev.cac_started) {
|
|
|
+- chandef = sdata->vif.bss_conf.chandef;
|
|
|
+- WARN_ON(local->suspended);
|
|
|
+- mutex_lock(&local->mtx);
|
|
|
+- ieee80211_vif_release_channel(sdata);
|
|
|
+- mutex_unlock(&local->mtx);
|
|
|
+- cfg80211_cac_event(sdata->dev, &chandef,
|
|
|
+- NL80211_RADAR_CAC_ABORTED,
|
|
|
+- GFP_KERNEL);
|
|
|
+- }
|
|
|
+-
|
|
|
+- /* APs need special treatment */
|
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
+- struct ieee80211_sub_if_data *vlan, *tmpsdata;
|
|
|
+-
|
|
|
+- /* down all dependent devices, that is VLANs */
|
|
|
+- list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
|
|
|
+- u.vlan.list)
|
|
|
+- dev_close(vlan->dev);
|
|
|
+- WARN_ON(!list_empty(&sdata->u.ap.vlans));
|
|
|
+- } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
+- /* remove all packets in parent bc_buf pointing to this dev */
|
|
|
+- ps = &sdata->bss->ps;
|
|
|
+-
|
|
|
+- spin_lock_irqsave(&ps->bc_buf.lock, flags);
|
|
|
+- skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
|
|
|
+- if (skb->dev == sdata->dev) {
|
|
|
+- __skb_unlink(skb, &ps->bc_buf);
|
|
|
+- local->total_ps_buffered--;
|
|
|
+- ieee80211_free_txskb(&local->hw, skb);
|
|
|
+- }
|
|
|
+- }
|
|
|
+- spin_unlock_irqrestore(&ps->bc_buf.lock, flags);
|
|
|
+- }
|
|
|
+-
|
|
|
+- if (going_down)
|
|
|
+- local->open_count--;
|
|
|
+-
|
|
|
+- switch (sdata->vif.type) {
|
|
|
+- case NL80211_IFTYPE_AP_VLAN:
|
|
|
+- mutex_lock(&local->mtx);
|
|
|
+- list_del(&sdata->u.vlan.list);
|
|
|
+- mutex_unlock(&local->mtx);
|
|
|
+- RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
|
|
|
+- /* see comment in the default case below */
|
|
|
+- ieee80211_free_keys(sdata, true);
|
|
|
+- /* no need to tell driver */
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_MONITOR:
|
|
|
+- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
|
|
|
+- local->cooked_mntrs--;
|
|
|
+- break;
|
|
|
+- }
|
|
|
+-
|
|
|
+- local->monitors--;
|
|
|
+- if (local->monitors == 0) {
|
|
|
+- local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
|
|
|
+- hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
|
|
|
+- }
|
|
|
+-
|
|
|
+- ieee80211_adjust_monitor_flags(sdata, -1);
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_NAN:
|
|
|
+- /* clean all the functions */
|
|
|
+- spin_lock_bh(&sdata->u.nan.func_lock);
|
|
|
+-
|
|
|
+- idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) {
|
|
|
+- idr_remove(&sdata->u.nan.function_inst_ids, i);
|
|
|
+- cfg80211_free_nan_func(func);
|
|
|
+- }
|
|
|
+- idr_destroy(&sdata->u.nan.function_inst_ids);
|
|
|
+-
|
|
|
+- spin_unlock_bh(&sdata->u.nan.func_lock);
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_P2P_DEVICE:
|
|
|
+- /* relies on synchronize_rcu() below */
|
|
|
+- RCU_INIT_POINTER(local->p2p_sdata, NULL);
|
|
|
+- /* fall through */
|
|
|
+- default:
|
|
|
+- cancel_work_sync(&sdata->work);
|
|
|
+- /*
|
|
|
+- * When we get here, the interface is marked down.
|
|
|
+- * Free the remaining keys, if there are any
|
|
|
+- * (which can happen in AP mode if userspace sets
|
|
|
+- * keys before the interface is operating, and maybe
|
|
|
+- * also in WDS mode)
|
|
|
+- *
|
|
|
+- * Force the key freeing to always synchronize_net()
|
|
|
+- * to wait for the RX path in case it is using this
|
|
|
+- * interface enqueuing frames at this very time on
|
|
|
+- * another CPU.
|
|
|
+- */
|
|
|
+- ieee80211_free_keys(sdata, true);
|
|
|
+- skb_queue_purge(&sdata->skb_queue);
|
|
|
+- }
|
|
|
+-
|
|
|
+- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
+- for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
|
|
|
+- skb_queue_walk_safe(&local->pending[i], skb, tmp) {
|
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
+- if (info->control.vif == &sdata->vif) {
|
|
|
+- __skb_unlink(skb, &local->pending[i]);
|
|
|
+- ieee80211_free_txskb(&local->hw, skb);
|
|
|
+- }
|
|
|
+- }
|
|
|
+- }
|
|
|
+- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
+-
|
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
+- ieee80211_txq_remove_vlan(local, sdata);
|
|
|
+-
|
|
|
+- sdata->bss = NULL;
|
|
|
+-
|
|
|
+- if (local->open_count == 0)
|
|
|
+- ieee80211_clear_tx_pending(local);
|
|
|
+-
|
|
|
+- sdata->vif.bss_conf.beacon_int = 0;
|
|
|
+-
|
|
|
+- /*
|
|
|
+- * If the interface goes down while suspended, presumably because
|
|
|
+- * the device was unplugged and that happens before our resume,
|
|
|
+- * then the driver is already unconfigured and the remainder of
|
|
|
+- * this function isn't needed.
|
|
|
+- * XXX: what about WoWLAN? If the device has software state, e.g.
|
|
|
+- * memory allocated, it might expect teardown commands from
|
|
|
+- * mac80211 here?
|
|
|
+- */
|
|
|
+- if (local->suspended) {
|
|
|
+- WARN_ON(local->wowlan);
|
|
|
+- WARN_ON(rtnl_dereference(local->monitor_sdata));
|
|
|
+- return;
|
|
|
+- }
|
|
|
+-
|
|
|
+- switch (sdata->vif.type) {
|
|
|
+- case NL80211_IFTYPE_AP_VLAN:
|
|
|
+- break;
|
|
|
+- case NL80211_IFTYPE_MONITOR:
|
|
|
+- if (local->monitors == 0)
|
|
|
+- ieee80211_del_virtual_monitor(local);
|
|
|
+-
|
|
|
+- mutex_lock(&local->mtx);
|
|
|
+- ieee80211_recalc_idle(local);
|
|
|
+- mutex_unlock(&local->mtx);
|
|
|
+-
|
|
|
+- if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
|
|
|
+- break;
|
|
|
+-
|
|
|
+- /* fall through */
|
|
|
+- default:
|
|
|
+- if (going_down)
|
|
|
+- drv_remove_interface(local, sdata);
|
|
|
+- }
|
|
|
+-
|
|
|
+- ieee80211_recalc_ps(local);
|
|
|
+-
|
|
|
+- if (cancel_scan)
|
|
|
+- flush_delayed_work(&local->scan_work);
|
|
|
+-
|
|
|
+- if (local->open_count == 0) {
|
|
|
+- ieee80211_stop_device(local);
|
|
|
+-
|
|
|
+- /* no reconfiguring after stop! */
|
|
|
+- return;
|
|
|
+- }
|
|
|
+-
|
|
|
+- /* do after stop to avoid reconfiguring when we stop anyway */
|
|
|
+- ieee80211_configure_filter(local);
|
|
|
+- ieee80211_hw_config(local, hw_reconf_flags);
|
|
|
+-
|
|
|
+- if (local->monitors == local->open_count)
|
|
|
+- ieee80211_add_virtual_monitor(local);
|
|
|
+-}
|
|
|
+-
|
|
|
+-static int ieee80211_stop(struct net_device *dev)
|
|
|
+-{
|
|
|
+- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
+-
|
|
|
+- ieee80211_do_stop(sdata, true);
|
|
|
+-
|
|
|
+- return 0;
|
|
|
+-}
|
|
|
+-
|
|
|
+-static void ieee80211_set_multicast_list(struct net_device *dev)
|
|
|
+-{
|
|
|
+- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
+- struct ieee80211_local *local = sdata->local;
|
|
|
+- int allmulti, sdata_allmulti;
|
|
|
+-
|
|
|
+- allmulti = !!(dev->flags & IFF_ALLMULTI);
|
|
|
+- sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
|
|
|
+-
|
|
|
+- if (allmulti != sdata_allmulti) {
|
|
|
+- if (dev->flags & IFF_ALLMULTI)
|
|
|
+- atomic_inc(&local->iff_allmultis);
|
|
|
+- else
|
|
|
+- atomic_dec(&local->iff_allmultis);
|
|
|
+- sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
|
|
|
+- }
|
|
|
+-
|
|
|
+- spin_lock_bh(&local->filter_lock);
|
|
|
+- __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
|
|
|
+- spin_unlock_bh(&local->filter_lock);
|
|
|
+- ieee80211_queue_work(&local->hw, &local->reconfig_filter);
|
|
|
+-}
|
|
|
+-
|
|
|
+-/*
|
|
|
+- * Called when the netdev is removed or, by the code below, before
|
|
|
+- * the interface type changes.
|
|
|
+- */
|
|
|
+-static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
|
|
|
+-{
|
|
|
+- int i;
|
|
|
+-
|
|
|
+- /* free extra data */
|
|
|
+- ieee80211_free_keys(sdata, false);
|
|
|
+-
|
|
|
+- ieee80211_debugfs_remove_netdev(sdata);
|
|
|
+-
|
|
|
+- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
|
|
|
+- __skb_queue_purge(&sdata->fragments[i].skb_list);
|
|
|
+- sdata->fragment_next = 0;
|
|
|
+-
|
|
|
+- if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
+- ieee80211_mesh_teardown_sdata(sdata);
|
|
|
+-}
|
|
|
+-
|
|
|
+-static void ieee80211_uninit(struct net_device *dev)
|
|
|
+-{
|
|
|
+- ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
|
|
|
+-}
|
|
|
+-
|
|
|
+-#if LINUX_VERSION_IS_GEQ(5,2,0)
|
|
|
+-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- struct net_device *sb_dev)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(4,19,0)
|
|
|
+-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- struct net_device *sb_dev,
|
|
|
+- select_queue_fallback_t fallback)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
|
|
|
+- (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
|
|
|
+-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- void *accel_priv,
|
|
|
+- select_queue_fallback_t fallback)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(3,13,0)
|
|
|
+-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- void *accel_priv)
|
|
|
+-#else
|
|
|
+-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb)
|
|
|
+-#endif
|
|
|
+-{
|
|
|
+- return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
|
|
|
+-}
|
|
|
+-
|
|
|
+-static void
|
|
|
+-ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
+-{
|
|
|
+- int i;
|
|
|
+-
|
|
|
+- for_each_possible_cpu(i) {
|
|
|
+- const struct pcpu_sw_netstats *tstats;
|
|
|
+- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
|
|
|
+- unsigned int start;
|
|
|
+-
|
|
|
+- tstats = per_cpu_ptr(netdev_tstats(dev), i);
|
|
|
+-
|
|
|
+- do {
|
|
|
+- start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
|
|
+- rx_packets = tstats->rx_packets;
|
|
|
+- tx_packets = tstats->tx_packets;
|
|
|
+- rx_bytes = tstats->rx_bytes;
|
|
|
+- tx_bytes = tstats->tx_bytes;
|
|
|
+- } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
|
|
+-
|
|
|
+- stats->rx_packets += rx_packets;
|
|
|
+- stats->tx_packets += tx_packets;
|
|
|
+- stats->rx_bytes += rx_bytes;
|
|
|
+- stats->tx_bytes += tx_bytes;
|
|
|
+- }
|
|
|
+-}
|
|
|
+-#if LINUX_VERSION_IS_LESS(4,11,0)
|
|
|
+-/* Just declare it here to keep sparse happy */
|
|
|
+-struct rtnl_link_stats64 *bp_ieee80211_get_stats64(struct net_device *dev,
|
|
|
+- struct rtnl_link_stats64 *stats);
|
|
|
+-struct rtnl_link_stats64 *
|
|
|
+-bp_ieee80211_get_stats64(struct net_device *dev,
|
|
|
+- struct rtnl_link_stats64 *stats){
|
|
|
+- ieee80211_get_stats64(dev, stats);
|
|
|
+- return stats;
|
|
|
+-}
|
|
|
+-#endif
|
|
|
+-
|
|
|
+-static const struct net_device_ops ieee80211_dataif_ops = {
|
|
|
+- .ndo_open = ieee80211_open,
|
|
|
+- .ndo_stop = ieee80211_stop,
|
|
|
+- .ndo_uninit = ieee80211_uninit,
|
|
|
+- .ndo_start_xmit = ieee80211_subif_start_xmit,
|
|
|
+- .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
+- .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
+- .ndo_select_queue = ieee80211_netdev_select_queue,
|
|
|
+-#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
+- .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
+-#else
|
|
|
+- .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
+-#endif
|
|
|
+-
|
|
|
+-};
|
|
|
+-
|
|
|
+-#if LINUX_VERSION_IS_GEQ(5,2,0)
|
|
|
+-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- struct net_device *sb_dev)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(4,19,0)
|
|
|
+-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- struct net_device *sb_dev,
|
|
|
+- select_queue_fallback_t fallback)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
|
|
|
+- (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
|
|
|
+-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- void *accel_priv,
|
|
|
+- select_queue_fallback_t fallback)
|
|
|
+-#elif LINUX_VERSION_IS_GEQ(3,13,0)
|
|
|
+-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb,
|
|
|
+- void *accel_priv)
|
|
|
+-#else
|
|
|
+-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
|
|
+- struct sk_buff *skb)
|
|
|
+-#endif
|
|
|
+-{
|
|
|
+- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
+- struct ieee80211_local *local = sdata->local;
|
|
|
+- struct ieee80211_hdr *hdr;
|
|
|
+- struct ieee80211_radiotap_header *rtap = (void *)skb->data;
|
|
|
+-
|
|
|
+- if (local->hw.queues < IEEE80211_NUM_ACS)
|
|
|
+- return 0;
|
|
|
+-
|
|
|
+- if (skb->len < 4 ||
|
|
|
+- skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
|
|
|
+- return 0; /* doesn't matter, frame will be dropped */
|
|
|
+-
|
|
|
+- hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
|
|
|
+-
|
|
|
+- return ieee80211_select_queue_80211(sdata, skb, hdr);
|
|
|
+-}
|
|
|
+-
|
|
|
+-static const struct net_device_ops ieee80211_monitorif_ops = {
|
|
|
+- .ndo_open = ieee80211_open,
|
|
|
+- .ndo_stop = ieee80211_stop,
|
|
|
+- .ndo_uninit = ieee80211_uninit,
|
|
|
+- .ndo_start_xmit = ieee80211_monitor_start_xmit,
|
|
|
+- .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
+- .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
+- .ndo_select_queue = ieee80211_monitor_select_queue,
|
|
|
+-#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
+- .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
+-#else
|
|
|
+- .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
+-#endif
|
|
|
+-
|
|
|
+-};
|
|
|
+-
|
|
|
+-static const struct net_device_ops ieee80211_dataif_8023_ops = {
|
|
|
+- .ndo_open = ieee80211_open,
|
|
|
+- .ndo_stop = ieee80211_stop,
|
|
|
+- .ndo_uninit = ieee80211_uninit,
|
|
|
+- .ndo_start_xmit = ieee80211_subif_start_xmit_8023,
|
|
|
+- .ndo_set_rx_mode = ieee80211_set_multicast_list,
|
|
|
+- .ndo_set_mac_address = ieee80211_change_mac,
|
|
|
+- .ndo_select_queue = ieee80211_netdev_select_queue,
|
|
|
+-#if LINUX_VERSION_IS_GEQ(4,11,0)
|
|
|
+- .ndo_get_stats64 = ieee80211_get_stats64,
|
|
|
+-#else
|
|
|
+- .ndo_get_stats64 = bp_ieee80211_get_stats64,
|
|
|
+-#endif
|
|
|
+-
|
|
|
+-};
|
|
|
+-
|
|
|
+ static void ieee80211_if_free(struct net_device *dev)
|
|
|
+ {
|
|
|
+ free_percpu(netdev_tstats(dev));
|
|
|
+@@ -1401,32 +1425,6 @@ static void ieee80211_if_setup_no_queue(
|
|
|
+ #endif
|
|
|
+ }
|
|
|
+
|
|
|
+-static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata)
|
|
|
+-{
|
|
|
+- struct ieee80211_local *local = sdata->local;
|
|
|
+- struct ieee80211_sub_if_data *bss = sdata;
|
|
|
+- bool enabled;
|
|
|
+-
|
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
+- if (!sdata->bss)
|
|
|
+- return;
|
|
|
+-
|
|
|
+- bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
|
|
|
+- }
|
|
|
+-
|
|
|
+- if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) ||
|
|
|
+- !ieee80211_iftype_supports_encap_offload(bss->vif.type))
|
|
|
+- return;
|
|
|
+-
|
|
|
+- enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED;
|
|
|
+- if (sdata->wdev.use_4addr &&
|
|
|
+- !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR))
|
|
|
+- enabled = false;
|
|
|
+-
|
|
|
+- sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops :
|
|
|
+- &ieee80211_dataif_ops;
|
|
|
+-}
|
|
|
+-
|
|
|
+ static void ieee80211_iface_work(struct work_struct *work)
|
|
|
+ {
|
|
|
+ struct ieee80211_sub_if_data *sdata =
|