| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454 |
- From b5c8c5d3888328321e8be1db50b75dff8f514e51 Mon Sep 17 00:00:00 2001
- From: Suruchi Agarwal <[email protected]>
- Date: Thu, 21 Mar 2024 16:21:19 -0700
- Subject: [PATCH] net: ethernet: qualcomm: Add Rx Ethernet DMA support
- Add Rx queues, rings, descriptors configurations and
- DMA support for the EDMA.
- Change-Id: I612bcd661e74d5bf3ecb33de10fd5298d18ff7e9
- Co-developed-by: Pavithra R <[email protected]>
- Signed-off-by: Pavithra R <[email protected]>
- Signed-off-by: Suruchi Agarwal <[email protected]>
- Alex G: add missing functions that were previously in ppe_api.c:
- - ppe_edma_queue_resource_get()
- - ppe_edma_ring_to_queues_config()
- Signed-off-by: Alexandru Gagniuc <[email protected]>
- ---
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.c | 214 +++-
- drivers/net/ethernet/qualcomm/ppe/edma.h | 22 +-
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 964 ++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 48 +
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 39 +-
- drivers/net/ethernet/qualcomm/ppe/edma_port.h | 31 +
- drivers/net/ethernet/qualcomm/ppe/edma_rx.c | 622 +++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_rx.h | 287 ++++++
- 9 files changed, 2224 insertions(+), 5 deletions(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.h
- --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
- +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
- @@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
-
- #EDMA
- -qcom-ppe-objs += edma.o edma_port.o
- +qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
- --- a/drivers/net/ethernet/qualcomm/ppe/edma.c
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
- @@ -18,12 +18,23 @@
- #include <linux/reset.h>
-
- #include "edma.h"
- +#include "edma_cfg_rx.h"
- #include "ppe_regs.h"
-
- #define EDMA_IRQ_NAME_SIZE 32
-
- /* Global EDMA context. */
- struct edma_context *edma_ctx;
- +static char **edma_rxdesc_irq_name;
- +
- +/* Module params. */
- +static int page_mode;
- +module_param(page_mode, int, 0);
- +MODULE_PARM_DESC(page_mode, "Enable page mode (default:0)");
- +
- +static int rx_buff_size;
- +module_param(rx_buff_size, int, 0640);
- +MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
-
- /* Priority to multi-queue mapping. */
- static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
- @@ -178,6 +189,59 @@ static int edma_configure_ucast_prio_map
- return ret;
- }
-
- +static int edma_irq_register(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + int ret;
- + u32 i;
- +
- + /* Request IRQ for RXDESC rings. */
- + edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
- + GFP_KERNEL);
- + if (!edma_rxdesc_irq_name)
- + return -ENOMEM;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
- + GFP_KERNEL);
- + if (!edma_rxdesc_irq_name[i]) {
- + ret = -ENOMEM;
- + goto rxdesc_irq_name_alloc_fail;
- + }
- +
- + snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
- + rx->ring_start + i);
- +
- + irq_set_status_flags(edma_ctx->intr_info.intr_rx[i], IRQ_DISABLE_UNLAZY);
- +
- + ret = request_irq(edma_ctx->intr_info.intr_rx[i],
- + edma_rx_handle_irq, IRQF_SHARED,
- + edma_rxdesc_irq_name[i],
- + (void *)&edma_ctx->rx_rings[i]);
- + if (ret) {
- + pr_err("RXDESC ring IRQ:%d request failed\n",
- + edma_ctx->intr_info.intr_rx[i]);
- + goto rx_desc_ring_intr_req_fail;
- + }
- +
- + pr_debug("RXDESC ring: %d IRQ:%d request success: %s\n",
- + rx->ring_start + i,
- + edma_ctx->intr_info.intr_rx[i],
- + edma_rxdesc_irq_name[i]);
- + }
- +
- + return 0;
- +
- +rx_desc_ring_intr_req_fail:
- + for (i = 0; i < rx->num_rings; i++)
- + kfree(edma_rxdesc_irq_name[i]);
- +rxdesc_irq_name_alloc_fail:
- + kfree(edma_rxdesc_irq_name);
- +
- + return ret;
- +}
- +
- static int edma_irq_init(void)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
- @@ -260,6 +324,16 @@ static int edma_irq_init(void)
- return 0;
- }
-
- +static int edma_alloc_rings(void)
- +{
- + if (edma_cfg_rx_rings_alloc()) {
- + pr_err("Error in allocating Rx rings\n");
- + return -ENOMEM;
- + }
- +
- + return 0;
- +}
- +
- static int edma_hw_reset(void)
- {
- struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- @@ -343,6 +417,40 @@ static int edma_hw_configure(void)
- if (!edma_ctx->netdev_arr)
- return -ENOMEM;
-
- + edma_ctx->dummy_dev = alloc_netdev_dummy(0);
- + if (!edma_ctx->dummy_dev) {
- + ret = -ENOMEM;
- + pr_err("Failed to allocate dummy device. ret: %d\n", ret);
- + goto dummy_dev_alloc_failed;
- + }
- +
- + /* Set EDMA jumbo MRU if enabled or set page mode. */
- + if (edma_ctx->rx_buf_size) {
- + edma_ctx->rx_page_mode = false;
- + pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
- + } else {
- + edma_ctx->rx_page_mode = page_mode;
- + }
- +
- + ret = edma_alloc_rings();
- + if (ret) {
- + pr_err("Error in initializaing the rings. ret: %d\n", ret);
- + goto edma_alloc_rings_failed;
- + }
- +
- + /* Disable interrupts. */
- + edma_cfg_rx_disable_interrupts();
- +
- + edma_cfg_rx_rings_disable();
- +
- + edma_cfg_rx_ring_mappings();
- +
- + ret = edma_cfg_rx_rings();
- + if (ret) {
- + pr_err("Error in configuring Rx rings. ret: %d\n", ret);
- + goto edma_cfg_rx_rings_failed;
- + }
- +
- /* Configure DMA request priority, DMA read burst length,
- * and AXI write size.
- */
- @@ -376,6 +484,10 @@ static int edma_hw_configure(void)
- data |= EDMA_MISC_TX_TIMEOUT_MASK;
- edma_ctx->intr_info.intr_mask_misc = data;
-
- + edma_cfg_rx_rings_enable();
- + edma_cfg_rx_napi_add();
- + edma_cfg_rx_napi_enable();
- +
- /* Global EDMA enable and padding enable. */
- data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
-
- @@ -389,11 +501,32 @@ static int edma_hw_configure(void)
- if (ret) {
- pr_err("Failed to initialize unicast priority map table: %d\n",
- ret);
- - kfree(edma_ctx->netdev_arr);
- - return ret;
- + goto configure_ucast_prio_map_tbl_failed;
- + }
- +
- + /* Initialize RPS hash map table. */
- + ret = edma_cfg_rx_rps_hash_map();
- + if (ret) {
- + pr_err("Failed to configure rps hash table: %d\n",
- + ret);
- + goto edma_cfg_rx_rps_hash_map_failed;
- }
-
- return 0;
- +
- +edma_cfg_rx_rps_hash_map_failed:
- +configure_ucast_prio_map_tbl_failed:
- + edma_cfg_rx_napi_disable();
- + edma_cfg_rx_napi_delete();
- + edma_cfg_rx_rings_disable();
- +edma_cfg_rx_rings_failed:
- + edma_cfg_rx_rings_cleanup();
- +edma_alloc_rings_failed:
- + free_netdev(edma_ctx->dummy_dev);
- +dummy_dev_alloc_failed:
- + kfree(edma_ctx->netdev_arr);
- +
- + return ret;
- }
-
- /**
- @@ -404,8 +537,31 @@ static int edma_hw_configure(void)
- */
- void edma_destroy(struct ppe_device *ppe_dev)
- {
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + /* Disable interrupts. */
- + edma_cfg_rx_disable_interrupts();
- +
- + /* Free IRQ for RXDESC rings. */
- + for (i = 0; i < rx->num_rings; i++) {
- + synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
- + free_irq(edma_ctx->intr_info.intr_rx[i],
- + (void *)&edma_ctx->rx_rings[i]);
- + kfree(edma_rxdesc_irq_name[i]);
- + }
- + kfree(edma_rxdesc_irq_name);
- +
- kfree(edma_ctx->intr_info.intr_rx);
- kfree(edma_ctx->intr_info.intr_txcmpl);
- +
- + edma_cfg_rx_napi_disable();
- + edma_cfg_rx_napi_delete();
- + edma_cfg_rx_rings_disable();
- + edma_cfg_rx_rings_cleanup();
- +
- + free_netdev(edma_ctx->dummy_dev);
- kfree(edma_ctx->netdev_arr);
- }
-
- @@ -428,6 +584,7 @@ int edma_setup(struct ppe_device *ppe_de
-
- edma_ctx->hw_info = &ipq9574_hw_info;
- edma_ctx->ppe_dev = ppe_dev;
- + edma_ctx->rx_buf_size = rx_buff_size;
-
- /* Configure the EDMA common clocks. */
- ret = edma_clock_init();
- @@ -450,6 +607,16 @@ int edma_setup(struct ppe_device *ppe_de
- return ret;
- }
-
- + ret = edma_irq_register();
- + if (ret) {
- + dev_err(dev, "Error in irq registration\n");
- + kfree(edma_ctx->intr_info.intr_rx);
- + kfree(edma_ctx->intr_info.intr_txcmpl);
- + return ret;
- + }
- +
- + edma_cfg_rx_enable_interrupts();
- +
- dev_info(dev, "EDMA configuration successful\n");
-
- return 0;
- @@ -478,3 +645,46 @@ int ppe_edma_queue_offset_config(struct
- return ppe_queue_ucast_offset_hash_set(ppe_dev, 0,
- index, queue_offset);
- }
- +
- +/**
- + * ppe_edma_queue_resource_get - Get EDMA queue resource
- + * @ppe_dev: PPE device
- + * @type: Resource type
- + * @res_start: Resource start ID returned
- + * @res_end: Resource end ID returned
- + *
- + * PPE EDMA queue resource includes unicast queue and multicast queue.
- + *
- + * Return 0 on success, negative error code on failure.
- + */
- +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
- + int *res_start, int *res_end)
- +{
- + if (type != PPE_RES_UCAST && type != PPE_RES_MCAST)
- + return -EINVAL;
- +
- + return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
- +};
- +
- +/**
- + * ppe_edma_ring_to_queues_config - Map EDMA ring to PPE queues
- + * @ppe_dev: PPE device
- + * @ring_id: EDMA ring ID
- + * @num: Number of queues mapped to EDMA ring
- + * @queues: PPE queue IDs
- + *
- + * PPE queues are configured to map with the special EDMA ring ID.
- + *
- + * Return 0 on success, negative error code on failure.
- + */
- +int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
- + int num, int queues[] __counted_by(num))
- +{
- + u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
- + int index;
- +
- + for (index = 0; index < num; index++)
- + queue_bmap[queues[index] / 32] |= BIT_MASK(queues[index] % 32);
- +
- + return ppe_ring_queue_map_set(ppe_dev, ring_id, queue_bmap);
- +}
- --- a/drivers/net/ethernet/qualcomm/ppe/edma.h
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
- @@ -6,6 +6,7 @@
- #define __EDMA_MAIN__
-
- #include "ppe_config.h"
- +#include "edma_rx.h"
-
- /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
- *
- @@ -29,6 +30,11 @@
- /* Interface ID start. */
- #define EDMA_START_IFNUM 1
-
- +#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
- + typeof(_max) (max) = (_max); \
- + ((((head) - (tail)) + \
- + (max)) & ((max) - 1)); })
- +
- /**
- * enum ppe_queue_class_type - PPE queue class type
- * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
- @@ -92,18 +98,28 @@ struct edma_intr_info {
- /**
- * struct edma_context - EDMA context.
- * @netdev_arr: Net device for each EDMA port
- + * @dummy_dev: Dummy netdevice for RX DMA
- * @ppe_dev: PPE device
- * @hw_info: EDMA Hardware info
- * @intr_info: EDMA Interrupt info
- + * @rxfill_rings: Rx fill Rings, SW is producer
- + * @rx_rings: Rx Desc Rings, SW is consumer
- + * @rx_page_mode: Page mode enabled or disabled
- + * @rx_buf_size: Rx buffer size for Jumbo MRU
- */
- struct edma_context {
- struct net_device **netdev_arr;
- + struct net_device *dummy_dev;
- struct ppe_device *ppe_dev;
- struct edma_hw_info *hw_info;
- struct edma_intr_info intr_info;
- + struct edma_rxfill_ring *rxfill_rings;
- + struct edma_rxdesc_ring *rx_rings;
- + u32 rx_page_mode;
- + u32 rx_buf_size;
- };
-
- -/* Global EDMA context. */
- +/* Global EDMA context */
- extern struct edma_context *edma_ctx;
-
- void edma_destroy(struct ppe_device *ppe_dev);
- @@ -111,6 +127,10 @@ int edma_setup(struct ppe_device *ppe_de
- int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
- enum ppe_queue_class_type class,
- int index, int queue_offset);
- +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
- + int *res_start, int *res_end);
- +int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
- + int num, int queues[] __counted_by(num));
-
-
- #endif
- --- /dev/null
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
- @@ -0,0 +1,964 @@
- +// SPDX-License-Identifier: GPL-2.0-only
- +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
- + */
- +
- +/* Configure rings, Buffers and NAPI for receive path along with
- + * providing APIs to enable, disable, clean and map the Rx rings.
- + */
- +
- +#include <linux/cpumask.h>
- +#include <linux/dma-mapping.h>
- +#include <linux/kernel.h>
- +#include <linux/netdevice.h>
- +#include <linux/printk.h>
- +#include <linux/regmap.h>
- +#include <linux/skbuff.h>
- +
- +#include "edma.h"
- +#include "edma_cfg_rx.h"
- +#include "ppe.h"
- +#include "ppe_regs.h"
- +
- +/* EDMA Queue ID to Ring ID Table. */
- +#define EDMA_QID2RID_TABLE_MEM(q) (0xb9000 + (0x4 * (q)))
- +
- +/* Rx ring queue offset. */
- +#define EDMA_QUEUE_OFFSET(q_id) ((q_id) / EDMA_MAX_PRI_PER_CORE)
- +
- +/* Rx EDMA maximum queue supported. */
- +#define EDMA_CPU_PORT_QUEUE_MAX(queue_start) \
- + ((queue_start) + (EDMA_MAX_PRI_PER_CORE * num_possible_cpus()) - 1)
- +
- +/* EDMA Queue ID to Ring ID configuration. */
- +#define EDMA_QID2RID_NUM_PER_REG 4
- +
- +int rx_queues[] = {0, 8, 16, 24};
- +
- +static u32 edma_rx_ring_queue_map[][EDMA_MAX_CORE] = {{ 0, 8, 16, 24 },
- + { 1, 9, 17, 25 },
- + { 2, 10, 18, 26 },
- + { 3, 11, 19, 27 },
- + { 4, 12, 20, 28 },
- + { 5, 13, 21, 29 },
- + { 6, 14, 22, 30 },
- + { 7, 15, 23, 31 }};
- +
- +static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, ret;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev, rxdesc_ring->ring_id,
- + ARRAY_SIZE(rx_queues), rx_queues);
- + if (ret) {
- + pr_err("Error in unmapping rxdesc ring %d to PPE queue mapping to disable its backpressure configuration\n",
- + i);
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static int edma_cfg_rx_desc_ring_reset_queue_priority(u32 rxdesc_ring_idx)
- +{
- + u32 i, queue_id, ret;
- +
- + for (i = 0; i < EDMA_MAX_PRI_PER_CORE; i++) {
- + queue_id = edma_rx_ring_queue_map[i][rxdesc_ring_idx];
- +
- + ret = ppe_queue_priority_set(edma_ctx->ppe_dev, queue_id, i);
- + if (ret) {
- + pr_err("Error in resetting %u queue's priority\n",
- + queue_id);
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static int edma_cfg_rx_desc_ring_reset_queue_config(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, ret;
- +
- + if (unlikely(rx->num_rings > num_possible_cpus())) {
- + pr_err("Invalid count of rxdesc rings: %d\n",
- + rx->num_rings);
- + return -EINVAL;
- + }
- +
- + /* Unmap Rxdesc ring to PPE queue mapping */
- + ret = edma_cfg_rx_desc_rings_reset_queue_mapping();
- + if (ret) {
- + pr_err("Error in resetting Rx desc ring backpressure config\n");
- + return ret;
- + }
- +
- + /* Reset the priority for PPE queues mapped to Rx rings */
- + for (i = 0; i < rx->num_rings; i++) {
- + ret = edma_cfg_rx_desc_ring_reset_queue_priority(i);
- + if (ret) {
- + pr_err("Error in resetting ring:%d queue's priority\n",
- + i + rx->ring_start);
- + return ret;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static int edma_cfg_rx_desc_ring_to_queue_mapping(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- + int ret;
- +
- + /* Rxdesc ring to PPE queue mapping */
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev,
- + rxdesc_ring->ring_id,
- + ARRAY_SIZE(rx_queues), rx_queues);
- + if (ret) {
- + pr_err("Error in configuring Rx ring to PPE queue mapping, ret: %d, id: %d\n",
- + ret, rxdesc_ring->ring_id);
- + if (!edma_cfg_rx_desc_rings_reset_queue_mapping())
- + pr_err("Error in resetting Rx desc ringbackpressure configurations\n");
- +
- + return ret;
- + }
- +
- + pr_debug("Rx desc ring %d to PPE queue mapping for backpressure:\n",
- + rxdesc_ring->ring_id);
- + }
- +
- + return 0;
- +}
- +
- +static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + u32 data, reg;
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_BA(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, (u32)(rxdesc_ring->pdma & EDMA_RXDESC_BA_MASK));
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PREHEADER_BA(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, (u32)(rxdesc_ring->sdma & EDMA_RXDESC_PREHEADER_BA_MASK));
- +
- + data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
- + data |= (EDMA_RXDESC_PL_DEFAULT_VALUE & EDMA_RXDESC_PL_OFFSET_MASK)
- + << EDMA_RXDESC_PL_OFFSET_SHIFT;
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, data);
- +
- + /* Configure the Mitigation timer */
- + data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
- + ppe_dev->clk_rate / MHZ);
- + data = ((data & EDMA_RX_MOD_TIMER_INIT_MASK)
- + << EDMA_RX_MOD_TIMER_INIT_SHIFT);
- + pr_debug("EDMA Rx mitigation timer value: %d\n", data);
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RX_MOD_TIMER(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, data);
- +
- + /* Configure the Mitigation packet count */
- + data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
- + << EDMA_RXDESC_LOW_THRE_SHIFT;
- + pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, data);
- +
- + /* Enable ring. Set ret mode to 'opaque'. */
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RX_INT_CTRL(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, EDMA_RX_NE_INT_EN);
- +}
- +
- +static void edma_cfg_rx_qid_to_rx_desc_ring_mapping(void)
- +{
- + u32 desc_index, ring_index, reg_index, data, q_id;
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 mcast_start, mcast_end, reg;
- + int ret;
- +
- + desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
- +
- + /* Here map all the queues to ring. */
- + for (q_id = EDMA_RX_QUEUE_START;
- + q_id <= EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START);
- + q_id += EDMA_QID2RID_NUM_PER_REG) {
- + reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
- + ring_index = desc_index + EDMA_QUEUE_OFFSET(q_id);
- +
- + data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, ring_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, ring_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, ring_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, ring_index);
- +
- + reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
- + regmap_write(regmap, reg, data);
- + pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x, desc_index: %d, reg_index: %d\n",
- + q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data, desc_index, reg_index);
- + }
- +
- + ret = ppe_edma_queue_resource_get(edma_ctx->ppe_dev, PPE_RES_MCAST,
- + &mcast_start, &mcast_end);
- + if (ret < 0) {
- + pr_err("Error in extracting multicast queue values\n");
- + return;
- + }
- +
- + /* Map multicast queues to the first Rx ring. */
- + desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
- + for (q_id = mcast_start; q_id <= mcast_end;
- + q_id += EDMA_QID2RID_NUM_PER_REG) {
- + reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
- +
- + data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, desc_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, desc_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, desc_index);
- + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, desc_index);
- +
- + reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
- + regmap_write(regmap, reg, data);
- +
- + pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x\n",
- + q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data);
- + }
- +}
- +
- +static void edma_cfg_rx_rings_to_rx_fill_mapping(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, data, reg;
- +
- + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR, 0);
- + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR, 0);
- + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR, 0);
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
- + u32 data, reg, ring_id;
- +
- + ring_id = rxdesc_ring->ring_id;
- + if (ring_id >= 0 && ring_id <= 9)
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
- + else if (ring_id >= 10 && ring_id <= 19)
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
- + else
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
- +
- + pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
- + ring_id,
- + rxdesc_ring->rxfill->ring_id);
- +
- + /* Set the Rx fill ring number in the mapping register. */
- + regmap_read(regmap, reg, &data);
- + data |= (rxdesc_ring->rxfill->ring_id &
- + EDMA_RXDESC2FILL_MAP_RXDESC_MASK) <<
- + ((ring_id % 10) * 3);
- + regmap_write(regmap, reg, data);
- + }
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
- + regmap_read(regmap, reg, &data);
- + pr_debug("EDMA_REG_RXDESC2FILL_MAP_0_ADDR: 0x%x\n", data);
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
- + regmap_read(regmap, reg, &data);
- + pr_debug("EDMA_REG_RXDESC2FILL_MAP_1_ADDR: 0x%x\n", data);
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
- + regmap_read(regmap, reg, &data);
- + pr_debug("EDMA_REG_RXDESC2FILL_MAP_2_ADDR: 0x%x\n", data);
- +}
- +
- +/**
- + * edma_cfg_rx_rings_enable - Enable Rx and Rxfill rings
- + *
- + * Enable Rx and Rxfill rings.
- + */
- +void edma_cfg_rx_rings_enable(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, reg;
- +
- + /* Enable Rx rings */
- + for (i = rx->ring_start; i < rx->ring_start + rx->num_rings; i++) {
- + u32 data;
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(i);
- + regmap_read(regmap, reg, &data);
- + data |= EDMA_RXDESC_RX_EN;
- + regmap_write(regmap, reg, data);
- + }
- +
- + for (i = rxfill->ring_start; i < rxfill->ring_start + rxfill->num_rings; i++) {
- + u32 data;
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(i);
- + regmap_read(regmap, reg, &data);
- + data |= EDMA_RXFILL_RING_EN;
- + regmap_write(regmap, reg, data);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_rings_disable - Disable Rx and Rxfill rings
- + *
- + * Disable Rx and Rxfill rings.
- + */
- +void edma_cfg_rx_rings_disable(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, reg;
- +
- + /* Disable Rx rings */
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring = NULL;
- + u32 data;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(rxdesc_ring->ring_id);
- + regmap_read(regmap, reg, &data);
- + data &= ~EDMA_RXDESC_RX_EN;
- + regmap_write(regmap, reg, data);
- + }
- +
- + /* Disable RxFill Rings */
- + for (i = 0; i < rxfill->num_rings; i++) {
- + struct edma_rxfill_ring *rxfill_ring = NULL;
- + u32 data;
- +
- + rxfill_ring = &edma_ctx->rxfill_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(rxfill_ring->ring_id);
- + regmap_read(regmap, reg, &data);
- + data &= ~EDMA_RXFILL_RING_EN;
- + regmap_write(regmap, reg, data);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_mappings - Setup RX ring mapping
- + *
- + * Setup queue ID to Rx desc ring mapping.
- + */
- +void edma_cfg_rx_ring_mappings(void)
- +{
- + edma_cfg_rx_qid_to_rx_desc_ring_mapping();
- + edma_cfg_rx_rings_to_rx_fill_mapping();
- +}
- +
- +static void edma_cfg_rx_fill_ring_cleanup(struct edma_rxfill_ring *rxfill_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct device *dev = ppe_dev->dev;
- + u16 cons_idx, curr_idx;
- + u32 data, reg;
- +
- + /* Get RxFill ring producer index */
- + curr_idx = rxfill_ring->prod_idx & EDMA_RXFILL_PROD_IDX_MASK;
- +
- + /* Get RxFill ring consumer index */
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->ring_id);
- + regmap_read(regmap, reg, &data);
- + cons_idx = data & EDMA_RXFILL_CONS_IDX_MASK;
- +
- + while (curr_idx != cons_idx) {
- + struct edma_rxfill_desc *rxfill_desc;
- + struct sk_buff *skb;
- +
- + /* Get RxFill descriptor */
- + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
- +
- + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
- +
- + /* Get skb from opaque */
- + skb = (struct sk_buff *)EDMA_RXFILL_OPAQUE_GET(rxfill_desc);
- + if (unlikely(!skb)) {
- + pr_err("Empty skb reference at index:%d\n",
- + cons_idx);
- + continue;
- + }
- +
- + dev_kfree_skb_any(skb);
- + }
- +
- + /* Free RxFill ring descriptors */
- + dma_free_coherent(dev, (sizeof(struct edma_rxfill_desc)
- + * rxfill_ring->count),
- + rxfill_ring->desc, rxfill_ring->dma);
- + rxfill_ring->desc = NULL;
- + rxfill_ring->dma = (dma_addr_t)0;
- +}
- +
- +static int edma_cfg_rx_fill_ring_dma_alloc(struct edma_rxfill_ring *rxfill_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct device *dev = ppe_dev->dev;
- +
- + /* Allocate RxFill ring descriptors */
- + rxfill_ring->desc = dma_alloc_coherent(dev, (sizeof(struct edma_rxfill_desc)
- + * rxfill_ring->count),
- + &rxfill_ring->dma,
- + GFP_KERNEL | __GFP_ZERO);
- + if (unlikely(!rxfill_ring->desc))
- + return -ENOMEM;
- +
- + return 0;
- +}
- +
- +static int edma_cfg_rx_desc_ring_dma_alloc(struct edma_rxdesc_ring *rxdesc_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct device *dev = ppe_dev->dev;
- +
- + rxdesc_ring->pdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_pri)
- + * rxdesc_ring->count),
- + &rxdesc_ring->pdma, GFP_KERNEL | __GFP_ZERO);
- + if (unlikely(!rxdesc_ring->pdesc))
- + return -ENOMEM;
- +
- + rxdesc_ring->sdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_sec)
- + * rxdesc_ring->count),
- + &rxdesc_ring->sdma, GFP_KERNEL | __GFP_ZERO);
- + if (unlikely(!rxdesc_ring->sdesc)) {
- + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
- + * rxdesc_ring->count),
- + rxdesc_ring->pdesc,
- + rxdesc_ring->pdma);
- + rxdesc_ring->pdesc = NULL;
- + rxdesc_ring->pdma = (dma_addr_t)0;
- + return -ENOMEM;
- + }
- +
- + return 0;
- +}
- +
- +static void edma_cfg_rx_desc_ring_cleanup(struct edma_rxdesc_ring *rxdesc_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct device *dev = ppe_dev->dev;
- + u32 prod_idx, cons_idx, reg;
- +
- + /* Get Rxdesc consumer & producer indices */
- + cons_idx = rxdesc_ring->cons_idx & EDMA_RXDESC_CONS_IDX_MASK;
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
- + regmap_read(regmap, reg, &prod_idx);
- + prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
- +
- + /* Free any buffers assigned to any descriptors */
- + while (cons_idx != prod_idx) {
- + struct edma_rxdesc_pri *rxdesc_pri =
- + EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
- + struct sk_buff *skb;
- +
- + /* Update consumer index */
- + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
- +
- + /* Get opaque from Rxdesc */
- + skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(rxdesc_pri);
- + if (unlikely(!skb)) {
- + pr_warn("Empty skb reference at index:%d\n",
- + cons_idx);
- + continue;
- + }
- +
- + dev_kfree_skb_any(skb);
- + }
- +
- + /* Update the consumer index */
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, cons_idx);
- +
- + /* Free Rxdesc ring descriptor */
- + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
- + * rxdesc_ring->count), rxdesc_ring->pdesc,
- + rxdesc_ring->pdma);
- + rxdesc_ring->pdesc = NULL;
- + rxdesc_ring->pdma = (dma_addr_t)0;
- +
- + /* Free any buffers assigned to any secondary ring descriptors */
- + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_sec)
- + * rxdesc_ring->count), rxdesc_ring->sdesc,
- + rxdesc_ring->sdma);
- + rxdesc_ring->sdesc = NULL;
- + rxdesc_ring->sdma = (dma_addr_t)0;
- +}
- +
- +static int edma_cfg_rx_rings_setup(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 ring_idx, alloc_size, buf_len;
- +
- + /* Set buffer allocation size */
- + if (edma_ctx->rx_buf_size) {
- + alloc_size = edma_ctx->rx_buf_size +
- + EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
- + buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
- + } else if (edma_ctx->rx_page_mode) {
- + alloc_size = EDMA_RX_PAGE_MODE_SKB_SIZE +
- + EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
- + buf_len = PAGE_SIZE;
- + } else {
- + alloc_size = EDMA_RX_BUFFER_SIZE;
- + buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
- + }
- +
- + pr_debug("EDMA ctx:%p rx_ring alloc_size=%d, buf_len=%d\n",
- + edma_ctx, alloc_size, buf_len);
- +
- + /* Allocate Rx fill ring descriptors */
- + for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++) {
- + u32 ret;
- + struct edma_rxfill_ring *rxfill_ring = NULL;
- +
- + rxfill_ring = &edma_ctx->rxfill_rings[ring_idx];
- + rxfill_ring->count = EDMA_RX_RING_SIZE;
- + rxfill_ring->ring_id = rxfill->ring_start + ring_idx;
- + rxfill_ring->alloc_size = alloc_size;
- + rxfill_ring->buf_len = buf_len;
- + rxfill_ring->page_mode = edma_ctx->rx_page_mode;
- +
- + ret = edma_cfg_rx_fill_ring_dma_alloc(rxfill_ring);
- + if (ret) {
- + pr_err("Error in setting up %d rxfill ring. ret: %d",
- + rxfill_ring->ring_id, ret);
- + while (--ring_idx >= 0)
- + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
- +
- + return -ENOMEM;
- + }
- + }
- +
- + /* Allocate RxDesc ring descriptors */
- + for (ring_idx = 0; ring_idx < rx->num_rings; ring_idx++) {
- + u32 index, queue_id = EDMA_RX_QUEUE_START;
- + struct edma_rxdesc_ring *rxdesc_ring = NULL;
- + u32 ret;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[ring_idx];
- + rxdesc_ring->count = EDMA_RX_RING_SIZE;
- + rxdesc_ring->ring_id = rx->ring_start + ring_idx;
- +
- + if (queue_id > EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START)) {
- + pr_err("Invalid queue_id: %d\n", queue_id);
- + while (--ring_idx >= 0)
- + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
- +
- + goto rxdesc_mem_alloc_fail;
- + }
- +
- + /* Create a mapping between RX Desc ring and Rx fill ring.
- + * Number of fill rings are lesser than the descriptor rings
- + * Share the fill rings across descriptor rings.
- + */
- + index = rxfill->ring_start +
- + (ring_idx % rxfill->num_rings);
- + rxdesc_ring->rxfill = &edma_ctx->rxfill_rings[index
- + - rxfill->ring_start];
- +
- + ret = edma_cfg_rx_desc_ring_dma_alloc(rxdesc_ring);
- + if (ret) {
- + pr_err("Error in setting up %d rxdesc ring. ret: %d",
- + rxdesc_ring->ring_id, ret);
- + while (--ring_idx >= 0)
- + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
- +
- + goto rxdesc_mem_alloc_fail;
- + }
- + }
- +
- + pr_debug("Rx descriptor count for Rx desc and Rx fill rings : %d\n",
- + EDMA_RX_RING_SIZE);
- +
- + return 0;
- +
- +rxdesc_mem_alloc_fail:
- + for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++)
- + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
- +
- + return -ENOMEM;
- +}
- +
- +/**
- + * edma_cfg_rx_buff_size_setup - Configure EDMA Rx jumbo buffer
- + *
- + * Configure EDMA Rx jumbo buffer
- + */
- +void edma_cfg_rx_buff_size_setup(void)
- +{
- + if (edma_ctx->rx_buf_size) {
- + edma_ctx->rx_page_mode = false;
- + pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_rings_alloc - Allocate EDMA Rx rings
- + *
- + * Allocate EDMA Rx rings.
- + *
- + * Return 0 on success, negative error code on failure.
- + */
- +int edma_cfg_rx_rings_alloc(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct edma_ring_info *rx = hw_info->rx;
- + int ret;
- +
- + edma_ctx->rxfill_rings = kzalloc((sizeof(*edma_ctx->rxfill_rings) *
- + rxfill->num_rings),
- + GFP_KERNEL);
- + if (!edma_ctx->rxfill_rings)
- + return -ENOMEM;
- +
- + edma_ctx->rx_rings = kzalloc((sizeof(*edma_ctx->rx_rings) *
- + rx->num_rings),
- + GFP_KERNEL);
- + if (!edma_ctx->rx_rings)
- + goto rxdesc_ring_alloc_fail;
- +
- + pr_debug("RxDesc:%u rx (%u-%u) RxFill:%u (%u-%u)\n",
- + rx->num_rings, rx->ring_start,
- + (rx->ring_start + rx->num_rings - 1),
- + rxfill->num_rings, rxfill->ring_start,
- + (rxfill->ring_start + rxfill->num_rings - 1));
- +
- + if (edma_cfg_rx_rings_setup()) {
- + pr_err("Error in setting up Rx rings\n");
- + goto rx_rings_setup_fail;
- + }
- +
- + /* Reset Rx descriptor ring mapped queue's configurations */
- + ret = edma_cfg_rx_desc_ring_reset_queue_config();
- + if (ret) {
- + pr_err("Error in resetting the Rx descriptor rings configurations\n");
- + edma_cfg_rx_rings_cleanup();
- + return ret;
- + }
- +
- + return 0;
- +
- +rx_rings_setup_fail:
- + kfree(edma_ctx->rx_rings);
- + edma_ctx->rx_rings = NULL;
- +rxdesc_ring_alloc_fail:
- + kfree(edma_ctx->rxfill_rings);
- + edma_ctx->rxfill_rings = NULL;
- +
- + return -ENOMEM;
- +}
- +
- +/**
- + * edma_cfg_rx_rings_cleanup - Cleanup EDMA Rx rings
- + *
- + * Cleanup EDMA Rx rings
- + */
- +void edma_cfg_rx_rings_cleanup(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + /* Free RxFill ring descriptors */
- + for (i = 0; i < rxfill->num_rings; i++)
- + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[i]);
- +
- + /* Free Rx completion ring descriptors */
- + for (i = 0; i < rx->num_rings; i++)
- + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[i]);
- +
- + kfree(edma_ctx->rxfill_rings);
- + kfree(edma_ctx->rx_rings);
- + edma_ctx->rxfill_rings = NULL;
- + edma_ctx->rx_rings = NULL;
- +}
- +
- +static void edma_cfg_rx_fill_ring_configure(struct edma_rxfill_ring *rxfill_ring)
- +{
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + u32 ring_sz, reg;
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_BA(rxfill_ring->ring_id);
- + regmap_write(regmap, reg, (u32)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
- +
- + ring_sz = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->ring_id);
- + regmap_write(regmap, reg, ring_sz);
- +
- + edma_rx_alloc_buffer(rxfill_ring, rxfill_ring->count - 1);
- +}
- +
- +static void edma_cfg_rx_desc_ring_flow_control(u32 threshold_xoff, u32 threshold_xon)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 data, i, reg;
- +
- + data = (threshold_xoff & EDMA_RXDESC_FC_XOFF_THRE_MASK) << EDMA_RXDESC_FC_XOFF_THRE_SHIFT;
- + data |= ((threshold_xon & EDMA_RXDESC_FC_XON_THRE_MASK) << EDMA_RXDESC_FC_XON_THRE_SHIFT);
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_FC_THRE(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, data);
- + }
- +}
- +
- +static void edma_cfg_rx_fill_ring_flow_control(int threshold_xoff, int threshold_xon)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + u32 data, i, reg;
- +
- + data = (threshold_xoff & EDMA_RXFILL_FC_XOFF_THRE_MASK) << EDMA_RXFILL_FC_XOFF_THRE_SHIFT;
- + data |= ((threshold_xon & EDMA_RXFILL_FC_XON_THRE_MASK) << EDMA_RXFILL_FC_XON_THRE_SHIFT);
- +
- + for (i = 0; i < rxfill->num_rings; i++) {
- + struct edma_rxfill_ring *rxfill_ring;
- +
- + rxfill_ring = &edma_ctx->rxfill_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_FC_THRE(rxfill_ring->ring_id);
- + regmap_write(regmap, reg, data);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_rings - Configure EDMA Rx rings.
- + *
- + * Configure EDMA Rx rings.
- + */
- +int edma_cfg_rx_rings(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rxfill = hw_info->rxfill;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + for (i = 0; i < rxfill->num_rings; i++)
- + edma_cfg_rx_fill_ring_configure(&edma_ctx->rxfill_rings[i]);
- +
- + for (i = 0; i < rx->num_rings; i++)
- + edma_cfg_rx_desc_ring_configure(&edma_ctx->rx_rings[i]);
- +
- + /* Configure Rx flow control configurations */
- + edma_cfg_rx_desc_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
- + edma_cfg_rx_fill_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
- +
- + return edma_cfg_rx_desc_ring_to_queue_mapping();
- +}
- +
- +/**
- + * edma_cfg_rx_disable_interrupts - EDMA disable RX interrupts
- + *
- + * Disable RX interrupt masks
- + */
- +void edma_cfg_rx_disable_interrupts(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, reg;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring =
- + &edma_ctx->rx_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_enable_interrupts - EDMA enable RX interrupts
- + *
- + * Enable RX interrupt masks
- + */
- +void edma_cfg_rx_enable_interrupts(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i, reg;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring =
- + &edma_ctx->rx_rings[i];
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_napi_disable - Disable NAPI for Rx
- + *
- + * Disable NAPI for Rx
- + */
- +void edma_cfg_rx_napi_disable(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + if (!rxdesc_ring->napi_added)
- + continue;
- +
- + napi_disable(&rxdesc_ring->napi);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_napi_enable - Enable NAPI for Rx
- + *
- + * Enable NAPI for Rx
- + */
- +void edma_cfg_rx_napi_enable(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + if (!rxdesc_ring->napi_added)
- + continue;
- +
- + napi_enable(&rxdesc_ring->napi);
- + }
- +}
- +
- +/**
- + * edma_cfg_rx_napi_delete - Delete Rx NAPI
- + *
- + * Delete RX NAPI
- + */
- +void edma_cfg_rx_napi_delete(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring;
- +
- + rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + if (!rxdesc_ring->napi_added)
- + continue;
- +
- + netif_napi_del(&rxdesc_ring->napi);
- + rxdesc_ring->napi_added = false;
- + }
- +}
- +
- +/* Add Rx NAPI */
- +/**
- + * edma_cfg_rx_napi_add - Add Rx NAPI
- + * @netdev: Netdevice
- + *
- + * Add RX NAPI
- + */
- +void edma_cfg_rx_napi_add(void)
- +{
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct edma_ring_info *rx = hw_info->rx;
- + u32 i;
- +
- + for (i = 0; i < rx->num_rings; i++) {
- + struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
- +
- + netif_napi_add_weight(edma_ctx->dummy_dev, &rxdesc_ring->napi,
- + edma_rx_napi_poll, hw_info->napi_budget_rx);
- + rxdesc_ring->napi_added = true;
- + }
- +
- + netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
- +}
- +
- +/**
- + * edma_cfg_rx_rps_hash_map - Configure rx rps hash map.
- + *
- + * Initialize and configure RPS hash map for queues
- + */
- +int edma_cfg_rx_rps_hash_map(void)
- +{
- + cpumask_t edma_rps_cpumask = {{EDMA_RX_DEFAULT_BITMAP}};
- + int map_len = 0, idx = 0, ret = 0;
- + u32 q_off = EDMA_RX_QUEUE_START;
- + u32 q_map[EDMA_MAX_CORE] = {0};
- + u32 hash, cpu;
- +
- + /* Map all possible hash values to queues used by the EDMA Rx
- + * rings based on a bitmask, which represents the cores to be mapped.
- + * These queues are expected to be mapped to different Rx rings
- + * which are assigned to different cores using IRQ affinity configuration.
- + */
- + for_each_cpu(cpu, &edma_rps_cpumask) {
- + q_map[map_len] = q_off + (cpu * EDMA_MAX_PRI_PER_CORE);
- + map_len++;
- + }
- +
- + for (hash = 0; hash < PPE_QUEUE_HASH_NUM; hash++) {
- + ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
- + PPE_QUEUE_CLASS_HASH, hash, q_map[idx]);
- + if (ret)
- + return ret;
- +
- + pr_debug("profile_id: %u, hash: %u, q_off: %u\n",
- + EDMA_CPU_PORT_PROFILE_ID, hash, q_map[idx]);
- + idx = (idx + 1) % map_len;
- + }
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
- @@ -0,0 +1,48 @@
- +/* SPDX-License-Identifier: GPL-2.0-only
- + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
- + */
- +
- +#ifndef __EDMA_CFG_RX__
- +#define __EDMA_CFG_RX__
- +
- +/* SKB payload size used in page mode */
- +#define EDMA_RX_PAGE_MODE_SKB_SIZE 256
- +
- +/* Rx flow control X-OFF default value */
- +#define EDMA_RX_FC_XOFF_DEF 32
- +
- +/* Rx flow control X-ON default value */
- +#define EDMA_RX_FC_XON_DEF 64
- +
- +/* Rx AC flow control original threshold */
- +#define EDMA_RX_AC_FC_THRE_ORIG 0x190
- +
- +/* Rx AC flow control default threshold */
- +#define EDMA_RX_AC_FC_THRES_DEF 0x104
- +/* Rx mitigation timer's default value in microseconds */
- +#define EDMA_RX_MITIGATION_TIMER_DEF 25
- +
- +/* Rx mitigation packet count's default value */
- +#define EDMA_RX_MITIGATION_PKT_CNT_DEF 16
- +
- +/* Default bitmap of cores for RPS to ARM cores */
- +#define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
- +
- +int edma_cfg_rx_rings(void);
- +int edma_cfg_rx_rings_alloc(void);
- +void edma_cfg_rx_ring_mappings(void);
- +void edma_cfg_rx_rings_cleanup(void);
- +void edma_cfg_rx_disable_interrupts(void);
- +void edma_cfg_rx_enable_interrupts(void);
- +void edma_cfg_rx_napi_disable(void);
- +void edma_cfg_rx_napi_enable(void);
- +void edma_cfg_rx_napi_delete(void);
- +void edma_cfg_rx_napi_add(void);
- +void edma_cfg_rx_mapping(void);
- +void edma_cfg_rx_rings_enable(void);
- +void edma_cfg_rx_rings_disable(void);
- +void edma_cfg_rx_buff_size_setup(void);
- +int edma_cfg_rx_rps_hash_map(void);
- +int edma_cfg_rx_rps(struct ctl_table *table, int write,
- + void *buffer, size_t *lenp, loff_t *ppos);
- +#endif
- --- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
- @@ -12,12 +12,39 @@
- #include <linux/printk.h>
-
- #include "edma.h"
- +#include "edma_cfg_rx.h"
- #include "edma_port.h"
- #include "ppe_regs.h"
-
- /* Number of netdev queues. */
- #define EDMA_NETDEV_QUEUE_NUM 4
-
- +static int edma_port_stats_alloc(struct net_device *netdev)
- +{
- + struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
- +
- + if (!port_priv)
- + return -EINVAL;
- +
- + /* Allocate per-cpu stats memory. */
- + port_priv->pcpu_stats.rx_stats =
- + netdev_alloc_pcpu_stats(struct edma_port_rx_stats);
- + if (!port_priv->pcpu_stats.rx_stats) {
- + netdev_err(netdev, "Per-cpu EDMA Rx stats alloc failed for %s\n",
- + netdev->name);
- + return -ENOMEM;
- + }
- +
- + return 0;
- +}
- +
- +static void edma_port_stats_free(struct net_device *netdev)
- +{
- + struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
- +
- + free_percpu(port_priv->pcpu_stats.rx_stats);
- +}
- +
- static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
- __maybe_unused struct sk_buff *skb,
- __maybe_unused struct net_device *sb_dev)
- @@ -172,6 +199,7 @@ void edma_port_destroy(struct ppe_port *
- int port_id = port->port_id;
- struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
-
- + edma_port_stats_free(netdev);
- unregister_netdev(netdev);
- free_netdev(netdev);
- ppe_port_phylink_destroy(port);
- @@ -232,6 +260,13 @@ int edma_port_setup(struct ppe_port *por
- port_id, netdev->dev_addr);
- }
-
- + /* Allocate memory for EDMA port statistics. */
- + ret = edma_port_stats_alloc(netdev);
- + if (ret) {
- + netdev_dbg(netdev, "EDMA port stats alloc failed\n");
- + goto stats_alloc_fail;
- + }
- +
- netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
- netdev->name, port_id);
-
- @@ -263,8 +298,10 @@ int edma_port_setup(struct ppe_port *por
- register_netdev_fail:
- ppe_port_phylink_destroy(port);
- port_phylink_setup_fail:
- - free_netdev(netdev);
- edma_ctx->netdev_arr[port_id - 1] = NULL;
- + edma_port_stats_free(netdev);
- +stats_alloc_fail:
- + free_netdev(netdev);
-
- return ret;
- }
- --- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
- @@ -15,14 +15,45 @@
- | NETIF_F_TSO6)
-
- /**
- + * struct edma_port_rx_stats - EDMA RX per CPU stats for the port.
- + * @rx_pkts: Number of Rx packets
- + * @rx_bytes: Number of Rx bytes
- + * @rx_drops: Number of Rx drops
- + * @rx_nr_frag_pkts: Number of Rx nr_frags packets
- + * @rx_fraglist_pkts: Number of Rx fraglist packets
- + * @rx_nr_frag_headroom_err: nr_frags headroom error packets
- + * @syncp: Synchronization pointer
- + */
- +struct edma_port_rx_stats {
- + u64 rx_pkts;
- + u64 rx_bytes;
- + u64 rx_drops;
- + u64 rx_nr_frag_pkts;
- + u64 rx_fraglist_pkts;
- + u64 rx_nr_frag_headroom_err;
- + struct u64_stats_sync syncp;
- +};
- +
- +/**
- + * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
- + * @rx_stats: Per CPU Rx statistics
- + */
- +struct edma_port_pcpu_stats {
- + struct edma_port_rx_stats __percpu *rx_stats;
- +};
- +
- +/**
- * struct edma_port_priv - EDMA port priv structure.
- * @ppe_port: Pointer to PPE port
- * @netdev: Corresponding netdevice
- + * @pcpu_stats: Per CPU netdev statistics
- + * @txr_map: Tx ring per-core mapping
- * @flags: Feature flags
- */
- struct edma_port_priv {
- struct ppe_port *ppe_port;
- struct net_device *netdev;
- + struct edma_port_pcpu_stats pcpu_stats;
- unsigned long flags;
- };
-
- --- /dev/null
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.c
- @@ -0,0 +1,622 @@
- +// SPDX-License-Identifier: GPL-2.0-only
- +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
- + */
- +
- +/* Provides APIs to alloc Rx Buffers, reap the buffers, receive and
- + * process linear and Scatter Gather packets.
- + */
- +
- +#include <linux/dma-mapping.h>
- +#include <linux/etherdevice.h>
- +#include <linux/irqreturn.h>
- +#include <linux/kernel.h>
- +#include <linux/netdevice.h>
- +#include <linux/platform_device.h>
- +#include <linux/printk.h>
- +#include <linux/regmap.h>
- +
- +#include "edma.h"
- +#include "edma_cfg_rx.h"
- +#include "edma_port.h"
- +#include "ppe.h"
- +#include "ppe_regs.h"
- +
- +static int edma_rx_alloc_buffer_list(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
- +{
- + struct edma_rxfill_stats *rxfill_stats = &rxfill_ring->rxfill_stats;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + u32 rx_alloc_size = rxfill_ring->alloc_size;
- + struct regmap *regmap = ppe_dev->regmap;
- + bool page_mode = rxfill_ring->page_mode;
- + struct edma_rxfill_desc *rxfill_desc;
- + u32 buf_len = rxfill_ring->buf_len;
- + struct device *dev = ppe_dev->dev;
- + u16 prod_idx, start_idx;
- + u16 num_alloc = 0;
- + u32 reg;
- +
- + prod_idx = rxfill_ring->prod_idx;
- + start_idx = prod_idx;
- +
- + while (likely(alloc_count--)) {
- + dma_addr_t buff_addr;
- + struct sk_buff *skb;
- + struct page *pg;
- +
- + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, prod_idx);
- +
- + skb = dev_alloc_skb(rx_alloc_size);
- + if (unlikely(!skb)) {
- + u64_stats_update_begin(&rxfill_stats->syncp);
- + ++rxfill_stats->alloc_failed;
- + u64_stats_update_end(&rxfill_stats->syncp);
- + break;
- + }
- +
- + skb_reserve(skb, EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN);
- +
- + if (likely(!page_mode)) {
- + buff_addr = dma_map_single(dev, skb->data, rx_alloc_size, DMA_FROM_DEVICE);
- + if (dma_mapping_error(dev, buff_addr)) {
- + dev_dbg(dev, "edma_context:%p Unable to dma for non page mode",
- + edma_ctx);
- + dev_kfree_skb_any(skb);
- + break;
- + }
- + } else {
- + pg = alloc_page(GFP_ATOMIC);
- + if (unlikely(!pg)) {
- + u64_stats_update_begin(&rxfill_stats->syncp);
- + ++rxfill_stats->page_alloc_failed;
- + u64_stats_update_end(&rxfill_stats->syncp);
- + dev_kfree_skb_any(skb);
- + dev_dbg(dev, "edma_context:%p Unable to allocate page",
- + edma_ctx);
- + break;
- + }
- +
- + buff_addr = dma_map_page(dev, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- + if (dma_mapping_error(dev, buff_addr)) {
- + dev_dbg(dev, "edma_context:%p Mapping error for page mode",
- + edma_ctx);
- + __free_page(pg);
- + dev_kfree_skb_any(skb);
- + break;
- + }
- +
- + skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
- + }
- +
- + EDMA_RXFILL_BUFFER_ADDR_SET(rxfill_desc, buff_addr);
- +
- + EDMA_RXFILL_OPAQUE_LO_SET(rxfill_desc, skb);
- +#ifdef __LP64__
- + EDMA_RXFILL_OPAQUE_HI_SET(rxfill_desc, skb);
- +#endif
- + EDMA_RXFILL_PACKET_LEN_SET(rxfill_desc,
- + (u32)(buf_len) & EDMA_RXFILL_BUF_SIZE_MASK);
- + prod_idx = (prod_idx + 1) & EDMA_RX_RING_SIZE_MASK;
- + num_alloc++;
- + }
- +
- + if (likely(num_alloc)) {
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->ring_id);
- + regmap_write(regmap, reg, prod_idx);
- + rxfill_ring->prod_idx = prod_idx;
- + }
- +
- + return num_alloc;
- +}
- +
- +/**
- + * edma_rx_alloc_buffer - EDMA Rx alloc buffer.
- + * @rxfill_ring: EDMA Rxfill ring
- + * @alloc_count: Number of rings to alloc
- + *
- + * Alloc Rx buffers for RxFill ring.
- + *
- + * Return the number of rings allocated.
- + */
- +int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
- +{
- + return edma_rx_alloc_buffer_list(rxfill_ring, alloc_count);
- +}
- +
- +/* Mark ip_summed appropriately in the skb as per the L3/L4 checksum
- + * status in descriptor.
- + */
- +static void edma_rx_checksum_verify(struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + u8 pid = EDMA_RXDESC_PID_GET(rxdesc_pri);
- +
- + skb_checksum_none_assert(skb);
- +
- + if (likely(EDMA_RX_PID_IS_IPV4(pid))) {
- + if (likely(EDMA_RXDESC_L3CSUM_STATUS_GET(rxdesc_pri)) &&
- + likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
- + skb->ip_summed = CHECKSUM_UNNECESSARY;
- + } else if (likely(EDMA_RX_PID_IS_IPV6(pid))) {
- + if (likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
- + skb->ip_summed = CHECKSUM_UNNECESSARY;
- + }
- +}
- +
- +static void edma_rx_process_last_segment(struct edma_rxdesc_ring *rxdesc_ring,
- + struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + bool page_mode = rxdesc_ring->rxfill->page_mode;
- + struct edma_port_pcpu_stats *pcpu_stats;
- + struct edma_port_rx_stats *rx_stats;
- + struct edma_port_priv *port_dev;
- + struct sk_buff *skb_head;
- + struct net_device *dev;
- + u32 pkt_length;
- +
- + /* Get packet length. */
- + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
- +
- + skb_head = rxdesc_ring->head;
- + dev = skb_head->dev;
- +
- + /* Check Rx checksum offload status. */
- + if (likely(dev->features & NETIF_F_RXCSUM))
- + edma_rx_checksum_verify(rxdesc_pri, skb_head);
- +
- + /* Get stats for the netdevice. */
- + port_dev = netdev_priv(dev);
- + pcpu_stats = &port_dev->pcpu_stats;
- + rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
- +
- + if (unlikely(page_mode)) {
- + if (unlikely(!pskb_may_pull(skb_head, ETH_HLEN))) {
- + /* Discard the SKB that we have been building,
- + * in addition to the SKB linked to current descriptor.
- + */
- + dev_kfree_skb_any(skb_head);
- + rxdesc_ring->head = NULL;
- + rxdesc_ring->last = NULL;
- + rxdesc_ring->pdesc_head = NULL;
- +
- + u64_stats_update_begin(&rx_stats->syncp);
- + rx_stats->rx_nr_frag_headroom_err++;
- + u64_stats_update_end(&rx_stats->syncp);
- +
- + return;
- + }
- + }
- +
- + if (unlikely(!pskb_pull(skb_head, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_ring->pdesc_head)))) {
- + dev_kfree_skb_any(skb_head);
- + rxdesc_ring->head = NULL;
- + rxdesc_ring->last = NULL;
- + rxdesc_ring->pdesc_head = NULL;
- +
- + u64_stats_update_begin(&rx_stats->syncp);
- + rx_stats->rx_nr_frag_headroom_err++;
- + u64_stats_update_end(&rx_stats->syncp);
- +
- + return;
- + }
- +
- + u64_stats_update_begin(&rx_stats->syncp);
- + rx_stats->rx_pkts++;
- + rx_stats->rx_bytes += skb_head->len;
- + rx_stats->rx_nr_frag_pkts += (u64)page_mode;
- + rx_stats->rx_fraglist_pkts += (u64)(!page_mode);
- + u64_stats_update_end(&rx_stats->syncp);
- +
- + pr_debug("edma_context:%p skb:%p Jumbo pkt_length:%u\n",
- + edma_ctx, skb_head, skb_head->len);
- +
- + skb_head->protocol = eth_type_trans(skb_head, dev);
- +
- + /* Send packet up the stack. */
- + if (dev->features & NETIF_F_GRO)
- + napi_gro_receive(&rxdesc_ring->napi, skb_head);
- + else
- + netif_receive_skb(skb_head);
- +
- + rxdesc_ring->head = NULL;
- + rxdesc_ring->last = NULL;
- + rxdesc_ring->pdesc_head = NULL;
- +}
- +
- +static void edma_rx_handle_frag_list(struct edma_rxdesc_ring *rxdesc_ring,
- + struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + u32 pkt_length;
- +
- + /* Get packet length. */
- + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
- + pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
- + edma_ctx, skb, pkt_length);
- +
- + if (!(rxdesc_ring->head)) {
- + skb_put(skb, pkt_length);
- + rxdesc_ring->head = skb;
- + rxdesc_ring->last = NULL;
- + rxdesc_ring->pdesc_head = rxdesc_pri;
- +
- + return;
- + }
- +
- + /* Append it to the fraglist of head if this is second frame
- + * If not second frame append to tail.
- + */
- + skb_put(skb, pkt_length);
- + if (!skb_has_frag_list(rxdesc_ring->head))
- + skb_shinfo(rxdesc_ring->head)->frag_list = skb;
- + else
- + rxdesc_ring->last->next = skb;
- +
- + rxdesc_ring->last = skb;
- + rxdesc_ring->last->next = NULL;
- + rxdesc_ring->head->len += pkt_length;
- + rxdesc_ring->head->data_len += pkt_length;
- + rxdesc_ring->head->truesize += skb->truesize;
- +
- + /* If there are more segments for this packet,
- + * then we have nothing to do. Otherwise process
- + * last segment and send packet to stack.
- + */
- + if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
- + return;
- +
- + edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
- +}
- +
- +static void edma_rx_handle_nr_frags(struct edma_rxdesc_ring *rxdesc_ring,
- + struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + skb_frag_t *frag = NULL;
- + u32 pkt_length;
- +
- + /* Get packet length. */
- + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
- + pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
- + edma_ctx, skb, pkt_length);
- +
- + if (!(rxdesc_ring->head)) {
- + skb->len = pkt_length;
- + skb->data_len = pkt_length;
- + skb->truesize = SKB_TRUESIZE(PAGE_SIZE);
- + rxdesc_ring->head = skb;
- + rxdesc_ring->last = NULL;
- + rxdesc_ring->pdesc_head = rxdesc_pri;
- +
- + return;
- + }
- +
- + frag = &skb_shinfo(skb)->frags[0];
- +
- + /* Append current frag at correct index as nr_frag of parent. */
- + skb_add_rx_frag(rxdesc_ring->head, skb_shinfo(rxdesc_ring->head)->nr_frags,
- + skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
- + skb_shinfo(skb)->nr_frags = 0;
- +
- + /* Free the SKB after we have appended its frag page to the head skb. */
- + dev_kfree_skb_any(skb);
- +
- + /* If there are more segments for this packet,
- + * then we have nothing to do. Otherwise process
- + * last segment and send packet to stack.
- + */
- + if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
- + return;
- +
- + edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
- +}
- +
- +static bool edma_rx_handle_linear_packets(struct edma_rxdesc_ring *rxdesc_ring,
- + struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + bool page_mode = rxdesc_ring->rxfill->page_mode;
- + struct edma_port_pcpu_stats *pcpu_stats;
- + struct edma_port_rx_stats *rx_stats;
- + struct edma_port_priv *port_dev;
- + skb_frag_t *frag = NULL;
- + u32 pkt_length;
- +
- + /* Get stats for the netdevice. */
- + port_dev = netdev_priv(skb->dev);
- + pcpu_stats = &port_dev->pcpu_stats;
- + rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
- +
- + /* Get packet length. */
- + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
- +
- + if (likely(!page_mode)) {
- + skb_put(skb, pkt_length);
- + goto send_to_stack;
- + }
- +
- + /* Handle linear packet in page mode. */
- + frag = &skb_shinfo(skb)->frags[0];
- + skb_add_rx_frag(skb, 0, skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
- +
- + /* Pull ethernet header into SKB data area for header processing. */
- + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
- + u64_stats_update_begin(&rx_stats->syncp);
- + rx_stats->rx_nr_frag_headroom_err++;
- + u64_stats_update_end(&rx_stats->syncp);
- + dev_kfree_skb_any(skb);
- +
- + return false;
- + }
- +
- +send_to_stack:
- +
- + __skb_pull(skb, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_pri));
- +
- + /* Check Rx checksum offload status. */
- + if (likely(skb->dev->features & NETIF_F_RXCSUM))
- + edma_rx_checksum_verify(rxdesc_pri, skb);
- +
- + u64_stats_update_begin(&rx_stats->syncp);
- + rx_stats->rx_pkts++;
- + rx_stats->rx_bytes += pkt_length;
- + rx_stats->rx_nr_frag_pkts += (u64)page_mode;
- + u64_stats_update_end(&rx_stats->syncp);
- +
- + skb->protocol = eth_type_trans(skb, skb->dev);
- + if (skb->dev->features & NETIF_F_GRO)
- + napi_gro_receive(&rxdesc_ring->napi, skb);
- + else
- + netif_receive_skb(skb);
- +
- + netdev_dbg(skb->dev, "edma_context:%p, skb:%p pkt_length:%u\n",
- + edma_ctx, skb, skb->len);
- +
- + return true;
- +}
- +
- +static struct net_device *edma_rx_get_src_dev(struct edma_rxdesc_stats *rxdesc_stats,
- + struct edma_rxdesc_pri *rxdesc_pri,
- + struct sk_buff *skb)
- +{
- + u32 src_info = EDMA_RXDESC_SRC_INFO_GET(rxdesc_pri);
- + struct edma_hw_info *hw_info = edma_ctx->hw_info;
- + struct net_device *ndev = NULL;
- + u8 src_port_num;
- +
- + /* Check src_info. */
- + if (likely((src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK)
- + == EDMA_RXDESC_SRCINFO_TYPE_PORTID)) {
- + src_port_num = src_info & EDMA_RXDESC_PORTNUM_BITS;
- + } else {
- + if (net_ratelimit()) {
- + pr_warn("Invalid src info_type:0x%x. Drop skb:%p\n",
- + (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK), skb);
- + }
- +
- + u64_stats_update_begin(&rxdesc_stats->syncp);
- + ++rxdesc_stats->src_port_inval_type;
- + u64_stats_update_end(&rxdesc_stats->syncp);
- +
- + return NULL;
- + }
- +
- + /* Packet with PP source. */
- + if (likely(src_port_num <= hw_info->max_ports)) {
- + if (unlikely(src_port_num < EDMA_START_IFNUM)) {
- + if (net_ratelimit())
- + pr_warn("Port number error :%d. Drop skb:%p\n",
- + src_port_num, skb);
- +
- + u64_stats_update_begin(&rxdesc_stats->syncp);
- + ++rxdesc_stats->src_port_inval;
- + u64_stats_update_end(&rxdesc_stats->syncp);
- +
- + return NULL;
- + }
- +
- + /* Get netdev for this port using the source port
- + * number as index into the netdev array. We need to
- + * subtract one since the indices start form '0' and
- + * port numbers start from '1'.
- + */
- + ndev = edma_ctx->netdev_arr[src_port_num - 1];
- + }
- +
- + if (likely(ndev))
- + return ndev;
- +
- + if (net_ratelimit())
- + pr_warn("Netdev Null src_info_type:0x%x src port num:%d Drop skb:%p\n",
- + (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK),
- + src_port_num, skb);
- +
- + u64_stats_update_begin(&rxdesc_stats->syncp);
- + ++rxdesc_stats->src_port_inval_netdev;
- + u64_stats_update_end(&rxdesc_stats->syncp);
- +
- + return NULL;
- +}
- +
- +static int edma_rx_reap(struct edma_rxdesc_ring *rxdesc_ring, int budget)
- +{
- + struct edma_rxdesc_stats *rxdesc_stats = &rxdesc_ring->rxdesc_stats;
- + u32 alloc_size = rxdesc_ring->rxfill->alloc_size;
- + bool page_mode = rxdesc_ring->rxfill->page_mode;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct edma_rxdesc_pri *next_rxdesc_pri;
- + struct regmap *regmap = ppe_dev->regmap;
- + struct device *dev = ppe_dev->dev;
- + u32 prod_idx, cons_idx, end_idx;
- + u32 work_to_do, work_done = 0;
- + struct sk_buff *next_skb;
- + u32 work_leftover, reg;
- +
- + /* Get Rx ring producer and consumer indices. */
- + cons_idx = rxdesc_ring->cons_idx;
- +
- + if (likely(rxdesc_ring->work_leftover > EDMA_RX_MAX_PROCESS)) {
- + work_to_do = rxdesc_ring->work_leftover;
- + } else {
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
- + regmap_read(regmap, reg, &prod_idx);
- + prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
- + work_to_do = EDMA_DESC_AVAIL_COUNT(prod_idx,
- + cons_idx, EDMA_RX_RING_SIZE);
- + rxdesc_ring->work_leftover = work_to_do;
- + }
- +
- + if (work_to_do > budget)
- + work_to_do = budget;
- +
- + rxdesc_ring->work_leftover -= work_to_do;
- + end_idx = (cons_idx + work_to_do) & EDMA_RX_RING_SIZE_MASK;
- + next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
- +
- + /* Get opaque from RXDESC. */
- + next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
- +
- + work_leftover = work_to_do & (EDMA_RX_MAX_PROCESS - 1);
- + while (likely(work_to_do--)) {
- + struct edma_rxdesc_pri *rxdesc_pri;
- + struct net_device *ndev;
- + struct sk_buff *skb;
- + dma_addr_t dma_addr;
- +
- + skb = next_skb;
- + rxdesc_pri = next_rxdesc_pri;
- + dma_addr = EDMA_RXDESC_BUFFER_ADDR_GET(rxdesc_pri);
- +
- + if (!page_mode)
- + dma_unmap_single(dev, dma_addr, alloc_size,
- + DMA_TO_DEVICE);
- + else
- + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
- +
- + /* Update consumer index. */
- + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
- +
- + /* Get the next Rx descriptor. */
- + next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
- +
- + /* Handle linear packets or initial segments first. */
- + if (likely(!(rxdesc_ring->head))) {
- + ndev = edma_rx_get_src_dev(rxdesc_stats, rxdesc_pri, skb);
- + if (unlikely(!ndev)) {
- + dev_kfree_skb_any(skb);
- + goto next_rx_desc;
- + }
- +
- + /* Update skb fields for head skb. */
- + skb->dev = ndev;
- + skb->skb_iif = ndev->ifindex;
- +
- + /* Handle linear packets. */
- + if (likely(!EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))) {
- + next_skb =
- + (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
- +
- + if (unlikely(!
- + edma_rx_handle_linear_packets(rxdesc_ring,
- + rxdesc_pri, skb)))
- + dev_kfree_skb_any(skb);
- +
- + goto next_rx_desc;
- + }
- + }
- +
- + next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
- +
- + /* Handle scatter frame processing for first/middle/last segments. */
- + page_mode ? edma_rx_handle_nr_frags(rxdesc_ring, rxdesc_pri, skb) :
- + edma_rx_handle_frag_list(rxdesc_ring, rxdesc_pri, skb);
- +
- +next_rx_desc:
- + /* Update work done. */
- + work_done++;
- +
- + /* Check if we can refill EDMA_RX_MAX_PROCESS worth buffers,
- + * if yes, refill and update index before continuing.
- + */
- + if (unlikely(!(work_done & (EDMA_RX_MAX_PROCESS - 1)))) {
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, cons_idx);
- + rxdesc_ring->cons_idx = cons_idx;
- + edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, EDMA_RX_MAX_PROCESS);
- + }
- + }
- +
- + /* Check if we need to refill and update
- + * index for any buffers before exit.
- + */
- + if (unlikely(work_leftover)) {
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, cons_idx);
- + rxdesc_ring->cons_idx = cons_idx;
- + edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, work_leftover);
- + }
- +
- + return work_done;
- +}
- +
- +/**
- + * edma_rx_napi_poll - EDMA Rx napi poll.
- + * @napi: NAPI structure
- + * @budget: Rx NAPI budget
- + *
- + * EDMA RX NAPI handler to handle the NAPI poll.
- + *
- + * Return the number of packets processed.
- + */
- +int edma_rx_napi_poll(struct napi_struct *napi, int budget)
- +{
- + struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)napi;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + int work_done = 0;
- + u32 status, reg;
- +
- + do {
- + work_done += edma_rx_reap(rxdesc_ring, budget - work_done);
- + if (likely(work_done >= budget))
- + return work_done;
- +
- + /* Check if there are more packets to process. */
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->ring_id);
- + regmap_read(regmap, reg, &status);
- + status = status & EDMA_RXDESC_RING_INT_STATUS_MASK;
- + } while (likely(status));
- +
- + napi_complete(napi);
- +
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
- +
- + return work_done;
- +}
- +
- +/**
- + * edma_rx_handle_irq - EDMA Rx handle irq.
- + * @irq: Interrupt to handle
- + * @ctx: Context
- + *
- + * Process RX IRQ and schedule NAPI.
- + *
- + * Return IRQ_HANDLED(1) on success.
- + */
- +irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
- +{
- + struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
- + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- + struct regmap *regmap = ppe_dev->regmap;
- + u32 reg;
- +
- + if (likely(napi_schedule_prep(&rxdesc_ring->napi))) {
- + /* Disable RxDesc interrupt. */
- + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
- + regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
- + __napi_schedule(&rxdesc_ring->napi);
- + }
- +
- + return IRQ_HANDLED;
- +}
- --- /dev/null
- +++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
- @@ -0,0 +1,287 @@
- +/* SPDX-License-Identifier: GPL-2.0-only
- + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
- + */
- +
- +#ifndef __EDMA_RX__
- +#define __EDMA_RX__
- +
- +#include <linux/netdevice.h>
- +
- +#define EDMA_RXFILL_RING_PER_CORE_MAX 1
- +#define EDMA_RXDESC_RING_PER_CORE_MAX 1
- +
- +/* Max Rx processing without replenishing RxFill ring. */
- +#define EDMA_RX_MAX_PROCESS 32
- +
- +#define EDMA_RX_SKB_HEADROOM 128
- +#define EDMA_RX_QUEUE_START 0
- +#define EDMA_RX_BUFFER_SIZE 1984
- +#define EDMA_MAX_CORE 4
- +
- +#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[(i)]))
- +#define EDMA_GET_PDESC(R, i, type) (&(((type *)((R)->pdesc))[(i)]))
- +#define EDMA_GET_SDESC(R, i, type) (&(((type *)((R)->sdesc))[(i)]))
- +#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, \
- + struct edma_rxfill_desc)
- +#define EDMA_RXDESC_PRI_DESC(R, i) EDMA_GET_PDESC(R, i, \
- + struct edma_rxdesc_pri)
- +#define EDMA_RXDESC_SEC_DESC(R, i) EDMA_GET_SDESC(R, i, \
- + struct edma_rxdesc_sec)
- +
- +#define EDMA_RX_RING_SIZE 2048
- +
- +#define EDMA_RX_RING_SIZE_MASK (EDMA_RX_RING_SIZE - 1)
- +#define EDMA_RX_RING_ID_MASK 0x1F
- +
- +#define EDMA_MAX_PRI_PER_CORE 8
- +#define EDMA_RX_PID_IPV4_MAX 0x3
- +#define EDMA_RX_PID_IPV6 0x4
- +#define EDMA_RX_PID_IS_IPV4(pid) (!((pid) & (~EDMA_RX_PID_IPV4_MAX)))
- +#define EDMA_RX_PID_IS_IPV6(pid) (!(!((pid) & EDMA_RX_PID_IPV6)))
- +
- +#define EDMA_RXDESC_BUFFER_ADDR_GET(desc) \
- + ((u32)(le32_to_cpu((__force __le32)((desc)->word0))))
- +#define EDMA_RXDESC_OPAQUE_GET(_desc) ({ \
- + typeof(_desc) (desc) = (_desc); \
- + ((uintptr_t)((u64)((desc)->word2) | \
- + ((u64)((desc)->word3) << 0x20))); })
- +
- +#define EDMA_RXDESC_SRCINFO_TYPE_PORTID 0x2000
- +#define EDMA_RXDESC_SRCINFO_TYPE_MASK 0xF000
- +#define EDMA_RXDESC_L3CSUM_STATUS_MASK BIT(13)
- +#define EDMA_RXDESC_L4CSUM_STATUS_MASK BIT(12)
- +#define EDMA_RXDESC_PORTNUM_BITS 0x0FFF
- +
- +#define EDMA_RXDESC_PACKET_LEN_MASK 0x3FFFF
- +#define EDMA_RXDESC_PACKET_LEN_GET(_desc) ({ \
- + typeof(_desc) (desc) = (_desc); \
- + ((le32_to_cpu((__force __le32)((desc)->word5))) & \
- + EDMA_RXDESC_PACKET_LEN_MASK); })
- +
- +#define EDMA_RXDESC_MORE_BIT_MASK 0x40000000
- +#define EDMA_RXDESC_MORE_BIT_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word1))) & \
- + EDMA_RXDESC_MORE_BIT_MASK)
- +#define EDMA_RXDESC_SRC_DST_INFO_GET(desc) \
- + ((u32)((le32_to_cpu((__force __le32)((desc)->word4)))))
- +
- +#define EDMA_RXDESC_L3_OFFSET_MASK GENMASK(23, 16)
- +#define EDMA_RXDESC_L3_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_L3_OFFSET_MASK, \
- + le32_to_cpu((__force __le32)((desc)->word7)))
- +
- +#define EDMA_RXDESC_PID_MASK GENMASK(15, 12)
- +#define EDMA_RXDESC_PID_GET(desc) FIELD_GET(EDMA_RXDESC_PID_MASK, \
- + le32_to_cpu((__force __le32)((desc)->word7)))
- +
- +#define EDMA_RXDESC_DST_INFO_MASK GENMASK(31, 16)
- +#define EDMA_RXDESC_DST_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_DST_INFO_MASK, \
- + le32_to_cpu((__force __le32)((desc)->word4)))
- +
- +#define EDMA_RXDESC_SRC_INFO_MASK GENMASK(15, 0)
- +#define EDMA_RXDESC_SRC_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_SRC_INFO_MASK, \
- + le32_to_cpu((__force __le32)((desc)->word4)))
- +
- +#define EDMA_RXDESC_PORT_ID_MASK GENMASK(11, 0)
- +#define EDMA_RXDESC_PORT_ID_GET(x) FIELD_GET(EDMA_RXDESC_PORT_ID_MASK, x)
- +
- +#define EDMA_RXDESC_SRC_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
- + (EDMA_RXDESC_SRC_INFO_GET(desc)))
- +#define EDMA_RXDESC_DST_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
- + (EDMA_RXDESC_DST_INFO_GET(desc)))
- +
- +#define EDMA_RXDESC_DST_PORT (0x2 << EDMA_RXDESC_PID_SHIFT)
- +
- +#define EDMA_RXDESC_L3CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L3CSUM_STATUS_MASK, \
- + le32_to_cpu((__force __le32)(desc)->word6))
- +#define EDMA_RXDESC_L4CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L4CSUM_STATUS_MASK, \
- + le32_to_cpu((__force __le32)(desc)->word6))
- +
- +#define EDMA_RXDESC_DATA_OFFSET_MASK GENMASK(11, 0)
- +#define EDMA_RXDESC_DATA_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_DATA_OFFSET_MASK, \
- + le32_to_cpu((__force __le32)(desc)->word6))
- +
- +#define EDMA_RXFILL_BUF_SIZE_MASK 0xFFFF
- +#define EDMA_RXFILL_BUF_SIZE_SHIFT 16
- +
- +/* Opaque values are not accessed by the EDMA HW,
- + * so endianness conversion is not needed.
- + */
- +
- +#define EDMA_RXFILL_OPAQUE_LO_SET(desc, ptr) (((desc)->word2) = \
- + (u32)(uintptr_t)(ptr))
- +#ifdef __LP64__
- +#define EDMA_RXFILL_OPAQUE_HI_SET(desc, ptr) (((desc)->word3) = \
- + (u32)((u64)(ptr) >> 0x20))
- +#endif
- +
- +#define EDMA_RXFILL_OPAQUE_GET(_desc) ({ \
- + typeof(_desc) (desc) = (_desc); \
- + ((uintptr_t)((u64)((desc)->word2) | \
- + ((u64)((desc)->word3) << 0x20))); })
- +
- +#define EDMA_RXFILL_PACKET_LEN_SET(desc, len) { \
- + (((desc)->word1) = (u32)((((u32)len) << EDMA_RXFILL_BUF_SIZE_SHIFT) & \
- + 0xFFFF0000)); \
- +}
- +
- +#define EDMA_RXFILL_BUFFER_ADDR_SET(desc, addr) (((desc)->word0) = (u32)(addr))
- +
- +/* Opaque values are set in word2 and word3, they are not accessed by the EDMA HW,
- + * so endianness conversion is not needed.
- + */
- +#define EDMA_RXFILL_ENDIAN_SET(_desc) ({ \
- + typeof(_desc) (desc) = (_desc); \
- + cpu_to_le32s(&((desc)->word0)); \
- + cpu_to_le32s(&((desc)->word1)); \
- +})
- +
- +/* RX DESC size shift to obtain index from descriptor pointer. */
- +#define EDMA_RXDESC_SIZE_SHIFT 5
- +
- +/**
- + * struct edma_rxdesc_stats - RX descriptor ring stats.
- + * @src_port_inval: Invalid source port number
- + * @src_port_inval_type: Source type is not PORT ID
- + * @src_port_inval_netdev: Invalid net device for the source port
- + * @syncp: Synchronization pointer
- + */
- +struct edma_rxdesc_stats {
- + u64 src_port_inval;
- + u64 src_port_inval_type;
- + u64 src_port_inval_netdev;
- + struct u64_stats_sync syncp;
- +};
- +
- +/**
- + * struct edma_rxfill_stats - Rx fill descriptor ring stats.
- + * @alloc_failed: Buffer allocation failure count
- + * @page_alloc_failed: Page allocation failure count for page mode
- + * @syncp: Synchronization pointer
- + */
- +struct edma_rxfill_stats {
- + u64 alloc_failed;
- + u64 page_alloc_failed;
- + struct u64_stats_sync syncp;
- +};
- +
- +/**
- + * struct edma_rxdesc_pri - Rx descriptor.
- + * @word0: Buffer address
- + * @word1: More bit, priority bit, service code
- + * @word2: Opaque low bits
- + * @word3: Opaque high bits
- + * @word4: Destination and source information
- + * @word5: WiFi QoS, data length
- + * @word6: Hash value, check sum status
- + * @word7: DSCP, packet offsets
- + */
- +struct edma_rxdesc_pri {
- + u32 word0;
- + u32 word1;
- + u32 word2;
- + u32 word3;
- + u32 word4;
- + u32 word5;
- + u32 word6;
- + u32 word7;
- +};
- +
- + /**
- + * struct edma_rxdesc_sec - Rx secondary descriptor.
- + * @word0: Timestamp
- + * @word1: Secondary checksum status
- + * @word2: QoS tag
- + * @word3: Flow index details
- + * @word4: Secondary packet offsets
- + * @word5: Multicast bit, checksum
- + * @word6: SVLAN, CVLAN
- + * @word7: Secondary SVLAN, CVLAN
- + */
- +struct edma_rxdesc_sec {
- + u32 word0;
- + u32 word1;
- + u32 word2;
- + u32 word3;
- + u32 word4;
- + u32 word5;
- + u32 word6;
- + u32 word7;
- +};
- +
- +/**
- + * struct edma_rxfill_desc - RxFill descriptor.
- + * @word0: Buffer address
- + * @word1: Buffer size
- + * @word2: Opaque low bits
- + * @word3: Opaque high bits
- + */
- +struct edma_rxfill_desc {
- + u32 word0;
- + u32 word1;
- + u32 word2;
- + u32 word3;
- +};
- +
- +/**
- + * struct edma_rxfill_ring - RxFill ring
- + * @ring_id: RxFill ring number
- + * @count: Number of descriptors in the ring
- + * @prod_idx: Ring producer index
- + * @alloc_size: Buffer size to allocate
- + * @desc: Descriptor ring virtual address
- + * @dma: Descriptor ring physical address
- + * @buf_len: Buffer length for rxfill descriptor
- + * @page_mode: Page mode for Rx processing
- + * @rx_fill_stats: Rx fill ring statistics
- + */
- +struct edma_rxfill_ring {
- + u32 ring_id;
- + u32 count;
- + u32 prod_idx;
- + u32 alloc_size;
- + struct edma_rxfill_desc *desc;
- + dma_addr_t dma;
- + u32 buf_len;
- + bool page_mode;
- + struct edma_rxfill_stats rxfill_stats;
- +};
- +
- +/**
- + * struct edma_rxdesc_ring - RxDesc ring
- + * @napi: Pointer to napi
- + * @ring_id: Rxdesc ring number
- + * @count: Number of descriptors in the ring
- + * @work_leftover: Leftover descriptors to be processed
- + * @cons_idx: Ring consumer index
- + * @pdesc: Primary descriptor ring virtual address
- + * @pdesc_head: Primary descriptor head in case of scatter-gather frame
- + * @sdesc: Secondary descriptor ring virtual address
- + * @rxdesc_stats: Rx descriptor ring statistics
- + * @rxfill: RxFill ring used
- + * @napi_added: Flag to indicate NAPI add status
- + * @pdma: Primary descriptor ring physical address
- + * @sdma: Secondary descriptor ring physical address
- + * @head: Head of the skb list in case of scatter-gather frame
- + * @last: Last skb of the skb list in case of scatter-gather frame
- + */
- +struct edma_rxdesc_ring {
- + struct napi_struct napi;
- + u32 ring_id;
- + u32 count;
- + u32 work_leftover;
- + u32 cons_idx;
- + struct edma_rxdesc_pri *pdesc;
- + struct edma_rxdesc_pri *pdesc_head;
- + struct edma_rxdesc_sec *sdesc;
- + struct edma_rxdesc_stats rxdesc_stats;
- + struct edma_rxfill_ring *rxfill;
- + bool napi_added;
- + dma_addr_t pdma;
- + dma_addr_t sdma;
- + struct sk_buff *head;
- + struct sk_buff *last;
- +};
- +
- +irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
- +int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
- +int edma_rx_napi_poll(struct napi_struct *napi, int budget);
- +#endif
|