| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737 |
- From: Lorenzo Bianconi <[email protected]>
- Date: Sat, 5 Nov 2022 23:36:19 +0100
- Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
- Introduce WO chip support to mtk wed driver. MTK WED WO is used to
- implement RX Wireless Ethernet Dispatch and offload traffic received by
- wlan nic to the wired interface.
- Tested-by: Daniel Golle <[email protected]>
- Co-developed-by: Sujuan Chen <[email protected]>
- Signed-off-by: Sujuan Chen <[email protected]>
- Signed-off-by: Lorenzo Bianconi <[email protected]>
- Signed-off-by: David S. Miller <[email protected]>
- ---
- create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
- --- a/drivers/net/ethernet/mediatek/Makefile
- +++ b/drivers/net/ethernet/mediatek/Makefile
- @@ -5,7 +5,7 @@
-
- obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
- mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
- -mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
- +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
- ifdef CONFIG_DEBUG_FS
- mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
- endif
- --- a/drivers/net/ethernet/mediatek/mtk_wed.c
- +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
- @@ -16,6 +16,7 @@
- #include "mtk_wed_regs.h"
- #include "mtk_wed.h"
- #include "mtk_ppe.h"
- +#include "mtk_wed_wo.h"
-
- #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
-
- @@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
-
- mtk_wed_free_buffer(dev);
- mtk_wed_free_tx_rings(dev);
- + if (hw->version != 1)
- + mtk_wed_wo_deinit(hw);
-
- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
- struct device_node *wlan_node;
- @@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
- }
-
- mtk_wed_hw_init_early(dev);
- - if (hw->hifsys)
- + if (hw->version == 1)
- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
- BIT(hw->index), 0);
- + else
- + ret = mtk_wed_wo_init(hw);
-
- out:
- mutex_unlock(&hw_lock);
- --- a/drivers/net/ethernet/mediatek/mtk_wed.h
- +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
- @@ -10,6 +10,7 @@
- #include <linux/netdevice.h>
-
- struct mtk_eth;
- +struct mtk_wed_wo;
-
- struct mtk_wed_hw {
- struct device_node *node;
- @@ -22,6 +23,7 @@ struct mtk_wed_hw {
- struct regmap *mirror;
- struct dentry *debugfs_dir;
- struct mtk_wed_device *wed_dev;
- + struct mtk_wed_wo *wed_wo;
- u32 debugfs_reg;
- u32 num_flows;
- u8 version;
- --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
- +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
- @@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
- if (id == MTK_WED_MODULE_ID_WO)
- hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
-
- - dev_kfree_skb(skb);
- - return 0;
- + return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
- }
-
- static int
- --- /dev/null
- +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
- @@ -0,0 +1,508 @@
- +// SPDX-License-Identifier: GPL-2.0-only
- +/* Copyright (C) 2022 MediaTek Inc.
- + *
- + * Author: Lorenzo Bianconi <[email protected]>
- + * Sujuan Chen <[email protected]>
- + */
- +
- +#include <linux/kernel.h>
- +#include <linux/dma-mapping.h>
- +#include <linux/of_platform.h>
- +#include <linux/interrupt.h>
- +#include <linux/of_address.h>
- +#include <linux/mfd/syscon.h>
- +#include <linux/of_irq.h>
- +#include <linux/bitfield.h>
- +
- +#include "mtk_wed.h"
- +#include "mtk_wed_regs.h"
- +#include "mtk_wed_wo.h"
- +
- +static u32
- +mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
- +{
- + u32 val;
- +
- + if (regmap_read(wo->mmio.regs, reg, &val))
- + val = ~0;
- +
- + return val;
- +}
- +
- +static void
- +mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
- +{
- + regmap_write(wo->mmio.regs, reg, val);
- +}
- +
- +static u32
- +mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
- +{
- + u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
- +
- + return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
- +}
- +
- +static void
- +mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
- +{
- + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
- +}
- +
- +static void
- +mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
- +{
- + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
- +}
- +
- +static void
- +mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
- +{
- + unsigned long flags;
- +
- + spin_lock_irqsave(&wo->mmio.lock, flags);
- + wo->mmio.irq_mask &= ~mask;
- + wo->mmio.irq_mask |= val;
- + if (set)
- + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
- + spin_unlock_irqrestore(&wo->mmio.lock, flags);
- +}
- +
- +static void
- +mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
- +{
- + mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
- + tasklet_schedule(&wo->mmio.irq_tasklet);
- +}
- +
- +static void
- +mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
- +{
- + mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
- +}
- +
- +static void
- +mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
- +{
- + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
- + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
- +}
- +
- +static void
- +mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
- + u32 val)
- +{
- + wmb();
- + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
- +}
- +
- +static void *
- +mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
- + bool flush)
- +{
- + int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
- + int index = (q->tail + 1) % q->n_desc;
- + struct mtk_wed_wo_queue_entry *entry;
- + struct mtk_wed_wo_queue_desc *desc;
- + void *buf;
- +
- + if (!q->queued)
- + return NULL;
- +
- + if (flush)
- + q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
- + else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
- + return NULL;
- +
- + q->tail = index;
- + q->queued--;
- +
- + desc = &q->desc[index];
- + entry = &q->entry[index];
- + buf = entry->buf;
- + if (len)
- + *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
- + le32_to_cpu(READ_ONCE(desc->ctrl)));
- + if (buf)
- + dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
- + DMA_FROM_DEVICE);
- + entry->buf = NULL;
- +
- + return buf;
- +}
- +
- +static int
- +mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
- + gfp_t gfp, bool rx)
- +{
- + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- + int n_buf = 0;
- +
- + spin_lock_bh(&q->lock);
- + while (q->queued < q->n_desc) {
- + void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
- + struct mtk_wed_wo_queue_entry *entry;
- + dma_addr_t addr;
- +
- + if (!buf)
- + break;
- +
- + addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
- + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
- + skb_free_frag(buf);
- + break;
- + }
- +
- + q->head = (q->head + 1) % q->n_desc;
- + entry = &q->entry[q->head];
- + entry->addr = addr;
- + entry->len = q->buf_size;
- + q->entry[q->head].buf = buf;
- +
- + if (rx) {
- + struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
- + u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
- + FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
- + entry->len);
- +
- + WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
- + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
- + }
- + q->queued++;
- + n_buf++;
- + }
- + spin_unlock_bh(&q->lock);
- +
- + return n_buf;
- +}
- +
- +static void
- +mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
- +{
- + mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
- + mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
- +}
- +
- +static void
- +mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
- +{
- + for (;;) {
- + struct mtk_wed_mcu_hdr *hdr;
- + struct sk_buff *skb;
- + void *data;
- + u32 len;
- +
- + data = mtk_wed_wo_dequeue(wo, q, &len, false);
- + if (!data)
- + break;
- +
- + skb = build_skb(data, q->buf_size);
- + if (!skb) {
- + skb_free_frag(data);
- + continue;
- + }
- +
- + __skb_put(skb, len);
- + if (mtk_wed_mcu_check_msg(wo, skb)) {
- + dev_kfree_skb(skb);
- + continue;
- + }
- +
- + hdr = (struct mtk_wed_mcu_hdr *)skb->data;
- + if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
- + mtk_wed_mcu_rx_event(wo, skb);
- + else
- + mtk_wed_mcu_rx_unsolicited_event(wo, skb);
- + }
- +
- + if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
- + u32 index = (q->head - 1) % q->n_desc;
- +
- + mtk_wed_wo_queue_kick(wo, q, index);
- + }
- +}
- +
- +static irqreturn_t
- +mtk_wed_wo_irq_handler(int irq, void *data)
- +{
- + struct mtk_wed_wo *wo = data;
- +
- + mtk_wed_wo_set_isr(wo, 0);
- + tasklet_schedule(&wo->mmio.irq_tasklet);
- +
- + return IRQ_HANDLED;
- +}
- +
- +static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
- +{
- + struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
- + u32 intr, mask;
- +
- + /* disable interrupts */
- + mtk_wed_wo_set_isr(wo, 0);
- +
- + intr = mtk_wed_wo_get_isr(wo);
- + intr &= wo->mmio.irq_mask;
- + mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
- + mtk_wed_wo_irq_disable(wo, mask);
- +
- + if (intr & MTK_WED_WO_RXCH_INT_MASK) {
- + mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
- + mtk_wed_wo_rx_complete(wo);
- + }
- +}
- +
- +/* mtk wed wo hw queues */
- +
- +static int
- +mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
- + int n_desc, int buf_size, int index,
- + struct mtk_wed_wo_queue_regs *regs)
- +{
- + spin_lock_init(&q->lock);
- + q->regs = *regs;
- + q->n_desc = n_desc;
- + q->buf_size = buf_size;
- +
- + q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
- + &q->desc_dma, GFP_KERNEL);
- + if (!q->desc)
- + return -ENOMEM;
- +
- + q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
- + GFP_KERNEL);
- + if (!q->entry)
- + return -ENOMEM;
- +
- + return 0;
- +}
- +
- +static void
- +mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
- +{
- + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
- + dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
- + q->desc_dma);
- +}
- +
- +static void
- +mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
- +{
- + struct page *page;
- + int i;
- +
- + spin_lock_bh(&q->lock);
- + for (i = 0; i < q->n_desc; i++) {
- + struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
- +
- + dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
- + DMA_TO_DEVICE);
- + skb_free_frag(entry->buf);
- + entry->buf = NULL;
- + }
- + spin_unlock_bh(&q->lock);
- +
- + if (!q->cache.va)
- + return;
- +
- + page = virt_to_page(q->cache.va);
- + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- + memset(&q->cache, 0, sizeof(q->cache));
- +}
- +
- +static void
- +mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
- +{
- + struct page *page;
- +
- + spin_lock_bh(&q->lock);
- + for (;;) {
- + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
- +
- + if (!buf)
- + break;
- +
- + skb_free_frag(buf);
- + }
- + spin_unlock_bh(&q->lock);
- +
- + if (!q->cache.va)
- + return;
- +
- + page = virt_to_page(q->cache.va);
- + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- + memset(&q->cache, 0, sizeof(q->cache));
- +}
- +
- +static void
- +mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
- +{
- + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
- + mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
- + mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
- +}
- +
- +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
- + struct sk_buff *skb)
- +{
- + struct mtk_wed_wo_queue_entry *entry;
- + struct mtk_wed_wo_queue_desc *desc;
- + int ret = 0, index;
- + u32 ctrl;
- +
- + spin_lock_bh(&q->lock);
- +
- + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
- + index = (q->head + 1) % q->n_desc;
- + if (q->tail == index) {
- + ret = -ENOMEM;
- + goto out;
- + }
- +
- + entry = &q->entry[index];
- + if (skb->len > entry->len) {
- + ret = -ENOMEM;
- + goto out;
- + }
- +
- + desc = &q->desc[index];
- + q->head = index;
- +
- + dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
- + DMA_TO_DEVICE);
- + memcpy(entry->buf, skb->data, skb->len);
- + dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
- + DMA_TO_DEVICE);
- +
- + ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
- + MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
- + WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
- + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
- +
- + mtk_wed_wo_queue_kick(wo, q, q->head);
- + mtk_wed_wo_kickout(wo);
- +out:
- + spin_unlock_bh(&q->lock);
- +
- + dev_kfree_skb(skb);
- +
- + return ret;
- +}
- +
- +static int
- +mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
- +{
- + return 0;
- +}
- +
- +static int
- +mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
- +{
- + struct mtk_wed_wo_queue_regs regs;
- + struct device_node *np;
- + int ret;
- +
- + np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
- + if (!np)
- + return -ENODEV;
- +
- + wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
- + if (IS_ERR_OR_NULL(wo->mmio.regs))
- + return PTR_ERR(wo->mmio.regs);
- +
- + wo->mmio.irq = irq_of_parse_and_map(np, 0);
- + wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
- + spin_lock_init(&wo->mmio.lock);
- + tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
- +
- + ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
- + mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
- + KBUILD_MODNAME, wo);
- + if (ret)
- + goto error;
- +
- + regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
- + regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
- + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
- + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
- +
- + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
- + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
- + ®s);
- + if (ret)
- + goto error;
- +
- + mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
- + mtk_wed_wo_queue_reset(wo, &wo->q_tx);
- +
- + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
- + regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
- + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
- + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
- +
- + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
- + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
- + ®s);
- + if (ret)
- + goto error;
- +
- + mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
- + mtk_wed_wo_queue_reset(wo, &wo->q_rx);
- +
- + /* rx queue irqmask */
- + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
- +
- + return 0;
- +
- +error:
- + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
- +
- + return ret;
- +}
- +
- +static void
- +mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
- +{
- + /* disable interrupts */
- + mtk_wed_wo_set_isr(wo, 0);
- +
- + tasklet_disable(&wo->mmio.irq_tasklet);
- +
- + disable_irq(wo->mmio.irq);
- + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
- +
- + mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
- + mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
- + mtk_wed_wo_queue_free(wo, &wo->q_tx);
- + mtk_wed_wo_queue_free(wo, &wo->q_rx);
- +}
- +
- +int mtk_wed_wo_init(struct mtk_wed_hw *hw)
- +{
- + struct mtk_wed_wo *wo;
- + int ret;
- +
- + wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
- + if (!wo)
- + return -ENOMEM;
- +
- + hw->wed_wo = wo;
- + wo->hw = hw;
- +
- + ret = mtk_wed_wo_hardware_init(wo);
- + if (ret)
- + return ret;
- +
- + ret = mtk_wed_mcu_init(wo);
- + if (ret)
- + return ret;
- +
- + return mtk_wed_wo_exception_init(wo);
- +}
- +
- +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
- +{
- + struct mtk_wed_wo *wo = hw->wed_wo;
- +
- + mtk_wed_wo_hw_deinit(wo);
- +}
- --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
- +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
- @@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
- #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
- #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
-
- +#define MTK_WED_WO_RING_SIZE 256
- +#define MTK_WED_WO_CMD_LEN 1504
- +
- +#define MTK_WED_WO_TXCH_NUM 0
- +#define MTK_WED_WO_RXCH_NUM 1
- +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
- +
- +#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
- +#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
- +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
- +#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
- + MTK_WED_WO_EXCEPTION_INT_MASK)
- +
- +#define MTK_WED_WO_CCIF_BUSY 0x004
- +#define MTK_WED_WO_CCIF_START 0x008
- +#define MTK_WED_WO_CCIF_TCHNUM 0x00c
- +#define MTK_WED_WO_CCIF_RCHNUM 0x010
- +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
- +
- +#define MTK_WED_WO_CCIF_ACK 0x014
- +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
- +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
- +#define MTK_WED_WO_CCIF_DUMMY1 0x020
- +#define MTK_WED_WO_CCIF_DUMMY2 0x024
- +#define MTK_WED_WO_CCIF_DUMMY3 0x028
- +#define MTK_WED_WO_CCIF_DUMMY4 0x02c
- +#define MTK_WED_WO_CCIF_SHADOW1 0x030
- +#define MTK_WED_WO_CCIF_SHADOW2 0x034
- +#define MTK_WED_WO_CCIF_SHADOW3 0x038
- +#define MTK_WED_WO_CCIF_SHADOW4 0x03c
- +#define MTK_WED_WO_CCIF_DUMMY5 0x050
- +#define MTK_WED_WO_CCIF_DUMMY6 0x054
- +#define MTK_WED_WO_CCIF_DUMMY7 0x058
- +#define MTK_WED_WO_CCIF_DUMMY8 0x05c
- +#define MTK_WED_WO_CCIF_SHADOW5 0x060
- +#define MTK_WED_WO_CCIF_SHADOW6 0x064
- +#define MTK_WED_WO_CCIF_SHADOW7 0x068
- +#define MTK_WED_WO_CCIF_SHADOW8 0x06c
- +
- +#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
- +#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
- +#define MTK_WED_WO_CTL_BURST BIT(15)
- +#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
- +#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
- +#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
- +#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
- +#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
- +
- struct mtk_wed_wo_memory_region {
- const char *name;
- void __iomem *addr;
- @@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
- u32 crc;
- };
-
- +struct mtk_wed_wo_queue_regs {
- + u32 desc_base;
- + u32 ring_size;
- + u32 cpu_idx;
- + u32 dma_idx;
- +};
- +
- +struct mtk_wed_wo_queue_desc {
- + __le32 buf0;
- + __le32 ctrl;
- + __le32 buf1;
- + __le32 info;
- + __le32 reserved[4];
- +} __packed __aligned(32);
- +
- +struct mtk_wed_wo_queue_entry {
- + dma_addr_t addr;
- + void *buf;
- + u32 len;
- +};
- +
- +struct mtk_wed_wo_queue {
- + struct mtk_wed_wo_queue_regs regs;
- +
- + struct page_frag_cache cache;
- + spinlock_t lock;
- +
- + struct mtk_wed_wo_queue_desc *desc;
- + dma_addr_t desc_dma;
- +
- + struct mtk_wed_wo_queue_entry *entry;
- +
- + u16 head;
- + u16 tail;
- + int n_desc;
- + int queued;
- + int buf_size;
- +
- +};
- +
- struct mtk_wed_wo {
- struct mtk_wed_hw *hw;
- struct mtk_wed_wo_memory_region boot;
-
- + struct mtk_wed_wo_queue q_tx;
- + struct mtk_wed_wo_queue q_rx;
- +
- struct {
- struct mutex mutex;
- int timeout;
- @@ -124,6 +215,15 @@ struct mtk_wed_wo {
- struct sk_buff_head res_q;
- wait_queue_head_t wait;
- } mcu;
- +
- + struct {
- + struct regmap *regs;
- +
- + spinlock_t lock;
- + struct tasklet_struct irq_tasklet;
- + int irq;
- + u32 irq_mask;
- + } mmio;
- };
-
- static inline int
- @@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
- int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
- const void *data, int len, bool wait_resp);
- int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
- +int mtk_wed_wo_init(struct mtk_wed_hw *hw);
- +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
- +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
- + struct sk_buff *skb);
-
- #endif /* __MTK_WED_WO_H */
|