0329-net-ethernet-qualcomm-Initialize-PPE-queue-settings.patch 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. From 63874f7c2e46f192e43e6214d66236372e36396c Mon Sep 17 00:00:00 2001
  2. From: Luo Jie <[email protected]>
  3. Date: Sun, 9 Feb 2025 22:29:41 +0800
  4. Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE queue settings
  5. Configure unicast and multicast hardware queues for the PPE
  6. ports to enable packet forwarding between the ports.
  7. Each PPE port is assigned with a range of queues. The queue ID
  8. selection for a packet is decided by the queue base and queue
  9. offset that is configured based on the internal priority and
  10. the RSS hash value of the packet.
  11. Signed-off-by: Luo Jie <[email protected]>
  12. ---
  13. .../net/ethernet/qualcomm/ppe/ppe_config.c | 356 +++++++++++++++++-
  14. .../net/ethernet/qualcomm/ppe/ppe_config.h | 63 ++++
  15. drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 21 ++
  16. 3 files changed, 439 insertions(+), 1 deletion(-)
  17. --- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
  18. +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
  19. @@ -128,6 +128,34 @@ struct ppe_scheduler_port_config {
  20. unsigned int drr_node_id;
  21. };
  22. +/**
  23. + * struct ppe_port_schedule_resource - PPE port scheduler resource.
  24. + * @ucastq_start: Unicast queue start ID.
  25. + * @ucastq_end: Unicast queue end ID.
  26. + * @mcastq_start: Multicast queue start ID.
  27. + * @mcastq_end: Multicast queue end ID.
  28. + * @flow_id_start: Flow start ID.
  29. + * @flow_id_end: Flow end ID.
  30. + * @l0node_start: Scheduler node start ID for queue level.
  31. + * @l0node_end: Scheduler node end ID for queue level.
  32. + * @l1node_start: Scheduler node start ID for flow level.
  33. + * @l1node_end: Scheduler node end ID for flow level.
  34. + *
  35. + * PPE scheduler resource allocated among the PPE ports.
  36. + */
  37. +struct ppe_port_schedule_resource {
  38. + unsigned int ucastq_start;
  39. + unsigned int ucastq_end;
  40. + unsigned int mcastq_start;
  41. + unsigned int mcastq_end;
  42. + unsigned int flow_id_start;
  43. + unsigned int flow_id_end;
  44. + unsigned int l0node_start;
  45. + unsigned int l0node_end;
  46. + unsigned int l1node_start;
  47. + unsigned int l1node_end;
  48. +};
  49. +
  50. /* Assign the share buffer number 1550 to group 0 by default. */
  51. static const int ipq9574_ppe_bm_group_config = 1550;
  52. @@ -676,6 +704,111 @@ static const struct ppe_scheduler_port_c
  53. },
  54. };
  55. +/* The scheduler resource is applied to each PPE port, The resource
  56. + * includes the unicast & multicast queues, flow nodes and DRR nodes.
  57. + */
  58. +static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
  59. + { .ucastq_start = 0,
  60. + .ucastq_end = 63,
  61. + .mcastq_start = 256,
  62. + .mcastq_end = 271,
  63. + .flow_id_start = 0,
  64. + .flow_id_end = 0,
  65. + .l0node_start = 0,
  66. + .l0node_end = 7,
  67. + .l1node_start = 0,
  68. + .l1node_end = 0,
  69. + },
  70. + { .ucastq_start = 144,
  71. + .ucastq_end = 159,
  72. + .mcastq_start = 272,
  73. + .mcastq_end = 275,
  74. + .flow_id_start = 36,
  75. + .flow_id_end = 39,
  76. + .l0node_start = 48,
  77. + .l0node_end = 63,
  78. + .l1node_start = 8,
  79. + .l1node_end = 11,
  80. + },
  81. + { .ucastq_start = 160,
  82. + .ucastq_end = 175,
  83. + .mcastq_start = 276,
  84. + .mcastq_end = 279,
  85. + .flow_id_start = 40,
  86. + .flow_id_end = 43,
  87. + .l0node_start = 64,
  88. + .l0node_end = 79,
  89. + .l1node_start = 12,
  90. + .l1node_end = 15,
  91. + },
  92. + { .ucastq_start = 176,
  93. + .ucastq_end = 191,
  94. + .mcastq_start = 280,
  95. + .mcastq_end = 283,
  96. + .flow_id_start = 44,
  97. + .flow_id_end = 47,
  98. + .l0node_start = 80,
  99. + .l0node_end = 95,
  100. + .l1node_start = 16,
  101. + .l1node_end = 19,
  102. + },
  103. + { .ucastq_start = 192,
  104. + .ucastq_end = 207,
  105. + .mcastq_start = 284,
  106. + .mcastq_end = 287,
  107. + .flow_id_start = 48,
  108. + .flow_id_end = 51,
  109. + .l0node_start = 96,
  110. + .l0node_end = 111,
  111. + .l1node_start = 20,
  112. + .l1node_end = 23,
  113. + },
  114. + { .ucastq_start = 208,
  115. + .ucastq_end = 223,
  116. + .mcastq_start = 288,
  117. + .mcastq_end = 291,
  118. + .flow_id_start = 52,
  119. + .flow_id_end = 55,
  120. + .l0node_start = 112,
  121. + .l0node_end = 127,
  122. + .l1node_start = 24,
  123. + .l1node_end = 27,
  124. + },
  125. + { .ucastq_start = 224,
  126. + .ucastq_end = 239,
  127. + .mcastq_start = 292,
  128. + .mcastq_end = 295,
  129. + .flow_id_start = 56,
  130. + .flow_id_end = 59,
  131. + .l0node_start = 128,
  132. + .l0node_end = 143,
  133. + .l1node_start = 28,
  134. + .l1node_end = 31,
  135. + },
  136. + { .ucastq_start = 240,
  137. + .ucastq_end = 255,
  138. + .mcastq_start = 296,
  139. + .mcastq_end = 299,
  140. + .flow_id_start = 60,
  141. + .flow_id_end = 63,
  142. + .l0node_start = 144,
  143. + .l0node_end = 159,
  144. + .l1node_start = 32,
  145. + .l1node_end = 35,
  146. + },
  147. + { .ucastq_start = 64,
  148. + .ucastq_end = 143,
  149. + .mcastq_start = 0,
  150. + .mcastq_end = 0,
  151. + .flow_id_start = 1,
  152. + .flow_id_end = 35,
  153. + .l0node_start = 8,
  154. + .l0node_end = 47,
  155. + .l1node_start = 1,
  156. + .l1node_end = 7,
  157. + },
  158. +};
  159. +
  160. /* Set the PPE queue level scheduler configuration. */
  161. static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
  162. int node_id, int port,
  163. @@ -807,6 +940,149 @@ int ppe_queue_scheduler_set(struct ppe_d
  164. port, scheduler_cfg);
  165. }
  166. +/**
  167. + * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
  168. + * @ppe_dev: PPE device
  169. + * @queue_dst: PPE queue destination configuration
  170. + * @queue_base: PPE queue base ID
  171. + * @profile_id: Profile ID
  172. + *
  173. + * The PPE unicast queue base ID and profile ID are configured based on the
  174. + * destination port information that can be service code or CPU code or the
  175. + * destination port.
  176. + *
  177. + * Return: 0 on success, negative error code on failure.
  178. + */
  179. +int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
  180. + struct ppe_queue_ucast_dest queue_dst,
  181. + int queue_base, int profile_id)
  182. +{
  183. + int index, profile_size;
  184. + u32 val, reg;
  185. +
  186. + profile_size = queue_dst.src_profile << 8;
  187. + if (queue_dst.service_code_en)
  188. + index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
  189. + queue_dst.service_code;
  190. + else if (queue_dst.cpu_code_en)
  191. + index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
  192. + queue_dst.cpu_code;
  193. + else
  194. + index = profile_size + queue_dst.dest_port;
  195. +
  196. + val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
  197. + val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
  198. + reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
  199. +
  200. + return regmap_write(ppe_dev->regmap, reg, val);
  201. +}
  202. +
  203. +/**
  204. + * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
  205. + * @ppe_dev: PPE device
  206. + * @profile_id: Profile ID
  207. + * @priority: PPE internal priority to be used to set queue offset
  208. + * @queue_offset: Queue offset used for calculating the destination queue ID
  209. + *
  210. + * The PPE unicast queue offset is configured based on the PPE
  211. + * internal priority.
  212. + *
  213. + * Return: 0 on success, negative error code on failure.
  214. + */
  215. +int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
  216. + int profile_id,
  217. + int priority,
  218. + int queue_offset)
  219. +{
  220. + u32 val, reg;
  221. + int index;
  222. +
  223. + index = (profile_id << 4) + priority;
  224. + val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
  225. + reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
  226. +
  227. + return regmap_write(ppe_dev->regmap, reg, val);
  228. +}
  229. +
  230. +/**
  231. + * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
  232. + * @ppe_dev: PPE device
  233. + * @profile_id: Profile ID
  234. + * @rss_hash: Packet hash value to be used to set queue offset
  235. + * @queue_offset: Queue offset used for calculating the destination queue ID
  236. + *
  237. + * The PPE unicast queue offset is configured based on the RSS hash value.
  238. + *
  239. + * Return: 0 on success, negative error code on failure.
  240. + */
  241. +int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
  242. + int profile_id,
  243. + int rss_hash,
  244. + int queue_offset)
  245. +{
  246. + u32 val, reg;
  247. + int index;
  248. +
  249. + index = (profile_id << 8) + rss_hash;
  250. + val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
  251. + reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
  252. +
  253. + return regmap_write(ppe_dev->regmap, reg, val);
  254. +}
  255. +
  256. +/**
  257. + * ppe_port_resource_get - Get PPE resource per port
  258. + * @ppe_dev: PPE device
  259. + * @port: PPE port
  260. + * @type: Resource type
  261. + * @res_start: Resource start ID returned
  262. + * @res_end: Resource end ID returned
  263. + *
  264. + * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
  265. + *
  266. + * Return: 0 on success, negative error code on failure.
  267. + */
  268. +int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
  269. + enum ppe_resource_type type,
  270. + int *res_start, int *res_end)
  271. +{
  272. + struct ppe_port_schedule_resource res;
  273. +
  274. + /* The reserved resource with the maximum port ID of PPE is
  275. + * also allowed to be acquired.
  276. + */
  277. + if (port > ppe_dev->num_ports)
  278. + return -EINVAL;
  279. +
  280. + res = ppe_scheduler_res[port];
  281. + switch (type) {
  282. + case PPE_RES_UCAST:
  283. + *res_start = res.ucastq_start;
  284. + *res_end = res.ucastq_end;
  285. + break;
  286. + case PPE_RES_MCAST:
  287. + *res_start = res.mcastq_start;
  288. + *res_end = res.mcastq_end;
  289. + break;
  290. + case PPE_RES_FLOW_ID:
  291. + *res_start = res.flow_id_start;
  292. + *res_end = res.flow_id_end;
  293. + break;
  294. + case PPE_RES_L0_NODE:
  295. + *res_start = res.l0node_start;
  296. + *res_end = res.l0node_end;
  297. + break;
  298. + case PPE_RES_L1_NODE:
  299. + *res_start = res.l1node_start;
  300. + *res_end = res.l1node_end;
  301. + break;
  302. + default:
  303. + return -EINVAL;
  304. + }
  305. +
  306. + return 0;
  307. +}
  308. +
  309. static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
  310. const struct ppe_bm_port_config port_cfg)
  311. {
  312. @@ -1140,6 +1416,80 @@ sch_config_fail:
  313. return ret;
  314. };
  315. +/* Configure PPE queue destination of each PPE port. */
  316. +static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
  317. +{
  318. + int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
  319. + struct ppe_queue_ucast_dest queue_dst;
  320. +
  321. + for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
  322. + memset(&queue_dst, 0, sizeof(queue_dst));
  323. +
  324. + ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
  325. + &res_start, &res_end);
  326. + if (ret)
  327. + return ret;
  328. +
  329. + q_base = res_start;
  330. + queue_dst.dest_port = port_id;
  331. +
  332. + /* Configure queue base ID and profile ID that is same as
  333. + * physical port ID.
  334. + */
  335. + ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
  336. + q_base, port_id);
  337. + if (ret)
  338. + return ret;
  339. +
  340. + /* Queue priority range supported by each PPE port */
  341. + ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
  342. + &res_start, &res_end);
  343. + if (ret)
  344. + return ret;
  345. +
  346. + pri_max = res_end - res_start;
  347. +
  348. + /* Redirect ARP reply packet with the max priority on CPU port,
  349. + * which keeps the ARP reply directed to CPU (CPU code is 101)
  350. + * with highest priority queue of EDMA.
  351. + */
  352. + if (port_id == 0) {
  353. + memset(&queue_dst, 0, sizeof(queue_dst));
  354. +
  355. + queue_dst.cpu_code_en = true;
  356. + queue_dst.cpu_code = 101;
  357. + ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
  358. + q_base + pri_max,
  359. + 0);
  360. + if (ret)
  361. + return ret;
  362. + }
  363. +
  364. + /* Initialize the queue offset of internal priority. */
  365. + for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
  366. + q_offset = index > pri_max ? pri_max : index;
  367. +
  368. + ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
  369. + index, q_offset);
  370. + if (ret)
  371. + return ret;
  372. + }
  373. +
  374. + /* Initialize the queue offset of RSS hash as 0 to avoid the
  375. + * random hardware value that will lead to the unexpected
  376. + * destination queue generated.
  377. + */
  378. + for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
  379. + ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
  380. + index, 0);
  381. + if (ret)
  382. + return ret;
  383. + }
  384. + }
  385. +
  386. + return 0;
  387. +}
  388. +
  389. int ppe_hw_config(struct ppe_device *ppe_dev)
  390. {
  391. int ret;
  392. @@ -1152,5 +1502,9 @@ int ppe_hw_config(struct ppe_device *ppe
  393. if (ret)
  394. return ret;
  395. - return ppe_config_scheduler(ppe_dev);
  396. + ret = ppe_config_scheduler(ppe_dev);
  397. + if (ret)
  398. + return ret;
  399. +
  400. + return ppe_queue_dest_init(ppe_dev);
  401. }
  402. --- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
  403. +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
  404. @@ -8,6 +8,16 @@
  405. #include "ppe.h"
  406. +/* There are different table index ranges for configuring queue base ID of
  407. + * the destination port, CPU code and service code.
  408. + */
  409. +#define PPE_QUEUE_BASE_DEST_PORT 0
  410. +#define PPE_QUEUE_BASE_CPU_CODE 1024
  411. +#define PPE_QUEUE_BASE_SERVICE_CODE 2048
  412. +
  413. +#define PPE_QUEUE_INTER_PRI_NUM 16
  414. +#define PPE_QUEUE_HASH_NUM 256
  415. +
  416. /**
  417. * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
  418. * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
  419. @@ -42,8 +52,61 @@ struct ppe_scheduler_cfg {
  420. enum ppe_scheduler_frame_mode frame_mode;
  421. };
  422. +/**
  423. + * enum ppe_resource_type - PPE resource type.
  424. + * @PPE_RES_UCAST: Unicast queue resource.
  425. + * @PPE_RES_MCAST: Multicast queue resource.
  426. + * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
  427. + * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
  428. + * @PPE_RES_FLOW_ID: Flow based node resource.
  429. + */
  430. +enum ppe_resource_type {
  431. + PPE_RES_UCAST,
  432. + PPE_RES_MCAST,
  433. + PPE_RES_L0_NODE,
  434. + PPE_RES_L1_NODE,
  435. + PPE_RES_FLOW_ID,
  436. +};
  437. +
  438. +/**
  439. + * struct ppe_queue_ucast_dest - PPE unicast queue destination.
  440. + * @src_profile: Source profile.
  441. + * @service_code_en: Enable service code to map the queue base ID.
  442. + * @service_code: Service code.
  443. + * @cpu_code_en: Enable CPU code to map the queue base ID.
  444. + * @cpu_code: CPU code.
  445. + * @dest_port: destination port.
  446. + *
  447. + * PPE egress queue ID is decided by the service code if enabled, otherwise
  448. + * by the CPU code if enabled, or by destination port if both service code
  449. + * and CPU code are disabled.
  450. + */
  451. +struct ppe_queue_ucast_dest {
  452. + int src_profile;
  453. + bool service_code_en;
  454. + int service_code;
  455. + bool cpu_code_en;
  456. + int cpu_code;
  457. + int dest_port;
  458. +};
  459. +
  460. int ppe_hw_config(struct ppe_device *ppe_dev);
  461. int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
  462. int node_id, bool flow_level, int port,
  463. struct ppe_scheduler_cfg scheduler_cfg);
  464. +int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
  465. + struct ppe_queue_ucast_dest queue_dst,
  466. + int queue_base,
  467. + int profile_id);
  468. +int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
  469. + int profile_id,
  470. + int priority,
  471. + int queue_offset);
  472. +int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
  473. + int profile_id,
  474. + int rss_hash,
  475. + int queue_offset);
  476. +int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
  477. + enum ppe_resource_type type,
  478. + int *res_start, int *res_end);
  479. #endif
  480. --- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
  481. +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
  482. @@ -164,6 +164,27 @@
  483. #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
  484. u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
  485. +/* The queue base configurations based on destination port,
  486. + * service code or CPU code.
  487. + */
  488. +#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
  489. +#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
  490. +#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
  491. +#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
  492. +#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
  493. +
  494. +/* The queue offset configurations based on RSS hash value. */
  495. +#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
  496. +#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
  497. +#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
  498. +#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
  499. +
  500. +/* The queue offset configurations based on PPE internal priority. */
  501. +#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
  502. +#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
  503. +#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
  504. +#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
  505. +
  506. /* PPE unicast queue (0-255) configurations. */
  507. #define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
  508. #define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256