202-core-linux-support-layerscape.patch 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. From f339945a8e81fff22df95284e142b79c37fd2333 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Thu, 5 Jul 2018 16:07:09 +0800
  4. Subject: [PATCH 02/32] core-linux: support layerscape
  5. This is an integrated patch for layerscape core-linux support.
  6. Signed-off-by: Madalin Bucur <[email protected]>
  7. Signed-off-by: Zhao Qiang <[email protected]>
  8. Signed-off-by: Camelia Groza <[email protected]>
  9. Signed-off-by: Madalin Bucur <[email protected]>
  10. Signed-off-by: Zhang Ying-22455 <[email protected]>
  11. Signed-off-by: Ramneek Mehresh <[email protected]>
  12. Signed-off-by: Jarod Wilson <[email protected]>
  13. Signed-off-by: Nikhil Badola <[email protected]>
  14. Signed-off-by: stephen hemminger <[email protected]>
  15. Signed-off-by: Arnd Bergmann <[email protected]>
  16. Signed-off-by: Yangbo Lu <[email protected]>
  17. ---
  18. drivers/base/devres.c | 66 ++++++
  19. drivers/base/soc.c | 70 ++++++
  20. .../net/ethernet/mellanox/mlxsw/spectrum.c | 2 +-
  21. .../mellanox/mlxsw/spectrum_switchdev.c | 2 +-
  22. drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 +-
  23. include/linux/device.h | 19 ++
  24. include/linux/dma-mapping.h | 5 +
  25. include/linux/fsl/svr.h | 97 ++++++++
  26. include/linux/fsl_devices.h | 3 +
  27. include/linux/irqdesc.h | 4 +
  28. include/linux/irqdomain.h | 13 +-
  29. include/linux/netdev_features.h | 2 +
  30. include/linux/netdevice.h | 10 +-
  31. include/linux/skbuff.h | 2 +
  32. include/linux/sys_soc.h | 3 +
  33. include/net/switchdev.h | 8 +-
  34. include/uapi/linux/if_ether.h | 1 +
  35. kernel/irq/Kconfig | 11 +
  36. kernel/irq/Makefile | 1 +
  37. kernel/irq/debugfs.c | 215 ++++++++++++++++++
  38. kernel/irq/internals.h | 22 ++
  39. kernel/irq/irqdesc.c | 1 +
  40. kernel/irq/irqdomain.c | 171 ++++++++++----
  41. kernel/irq/manage.c | 1 +
  42. kernel/irq/msi.c | 2 +-
  43. net/bridge/br.c | 4 +-
  44. net/bridge/br_fdb.c | 2 +
  45. net/bridge/br_private.h | 7 +
  46. net/bridge/br_switchdev.c | 33 +++
  47. net/core/dev.c | 30 ++-
  48. net/core/net-sysfs.c | 20 +-
  49. net/core/rtnetlink.c | 4 +-
  50. net/core/skbuff.c | 29 ++-
  51. net/sched/sch_generic.c | 7 +
  52. 34 files changed, 809 insertions(+), 62 deletions(-)
  53. create mode 100644 include/linux/fsl/svr.h
  54. create mode 100644 kernel/irq/debugfs.c
  55. --- a/drivers/base/devres.c
  56. +++ b/drivers/base/devres.c
  57. @@ -10,6 +10,7 @@
  58. #include <linux/device.h>
  59. #include <linux/module.h>
  60. #include <linux/slab.h>
  61. +#include <linux/percpu.h>
  62. #include "base.h"
  63. @@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev,
  64. &devres));
  65. }
  66. EXPORT_SYMBOL_GPL(devm_free_pages);
  67. +
  68. +static void devm_percpu_release(struct device *dev, void *pdata)
  69. +{
  70. + void __percpu *p;
  71. +
  72. + p = *(void __percpu **)pdata;
  73. + free_percpu(p);
  74. +}
  75. +
  76. +static int devm_percpu_match(struct device *dev, void *data, void *p)
  77. +{
  78. + struct devres *devr = container_of(data, struct devres, data);
  79. +
  80. + return *(void **)devr->data == p;
  81. +}
  82. +
  83. +/**
  84. + * __devm_alloc_percpu - Resource-managed alloc_percpu
  85. + * @dev: Device to allocate per-cpu memory for
  86. + * @size: Size of per-cpu memory to allocate
  87. + * @align: Alignment of per-cpu memory to allocate
  88. + *
  89. + * Managed alloc_percpu. Per-cpu memory allocated with this function is
  90. + * automatically freed on driver detach.
  91. + *
  92. + * RETURNS:
  93. + * Pointer to allocated memory on success, NULL on failure.
  94. + */
  95. +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
  96. + size_t align)
  97. +{
  98. + void *p;
  99. + void __percpu *pcpu;
  100. +
  101. + pcpu = __alloc_percpu(size, align);
  102. + if (!pcpu)
  103. + return NULL;
  104. +
  105. + p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
  106. + if (!p) {
  107. + free_percpu(pcpu);
  108. + return NULL;
  109. + }
  110. +
  111. + *(void __percpu **)p = pcpu;
  112. +
  113. + devres_add(dev, p);
  114. +
  115. + return pcpu;
  116. +}
  117. +EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
  118. +
  119. +/**
  120. + * devm_free_percpu - Resource-managed free_percpu
  121. + * @dev: Device this memory belongs to
  122. + * @pdata: Per-cpu memory to free
  123. + *
  124. + * Free memory allocated with devm_alloc_percpu().
  125. + */
  126. +void devm_free_percpu(struct device *dev, void __percpu *pdata)
  127. +{
  128. + WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
  129. + (void *)pdata));
  130. +}
  131. +EXPORT_SYMBOL_GPL(devm_free_percpu);
  132. --- a/drivers/base/soc.c
  133. +++ b/drivers/base/soc.c
  134. @@ -13,6 +13,7 @@
  135. #include <linux/spinlock.h>
  136. #include <linux/sys_soc.h>
  137. #include <linux/err.h>
  138. +#include <linux/glob.h>
  139. static DEFINE_IDA(soc_ida);
  140. @@ -159,3 +160,72 @@ static int __init soc_bus_register(void)
  141. return bus_register(&soc_bus_type);
  142. }
  143. core_initcall(soc_bus_register);
  144. +
  145. +static int soc_device_match_one(struct device *dev, void *arg)
  146. +{
  147. + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
  148. + const struct soc_device_attribute *match = arg;
  149. +
  150. + if (match->machine &&
  151. + (!soc_dev->attr->machine ||
  152. + !glob_match(match->machine, soc_dev->attr->machine)))
  153. + return 0;
  154. +
  155. + if (match->family &&
  156. + (!soc_dev->attr->family ||
  157. + !glob_match(match->family, soc_dev->attr->family)))
  158. + return 0;
  159. +
  160. + if (match->revision &&
  161. + (!soc_dev->attr->revision ||
  162. + !glob_match(match->revision, soc_dev->attr->revision)))
  163. + return 0;
  164. +
  165. + if (match->soc_id &&
  166. + (!soc_dev->attr->soc_id ||
  167. + !glob_match(match->soc_id, soc_dev->attr->soc_id)))
  168. + return 0;
  169. +
  170. + return 1;
  171. +}
  172. +
  173. +/*
  174. + * soc_device_match - identify the SoC in the machine
  175. + * @matches: zero-terminated array of possible matches
  176. + *
  177. + * returns the first matching entry of the argument array, or NULL
  178. + * if none of them match.
  179. + *
  180. + * This function is meant as a helper in place of of_match_node()
  181. + * in cases where either no device tree is available or the information
  182. + * in a device node is insufficient to identify a particular variant
  183. + * by its compatible strings or other properties. For new devices,
  184. + * the DT binding should always provide unique compatible strings
  185. + * that allow the use of of_match_node() instead.
  186. + *
  187. + * The calling function can use the .data entry of the
  188. + * soc_device_attribute to pass a structure or function pointer for
  189. + * each entry.
  190. + */
  191. +const struct soc_device_attribute *soc_device_match(
  192. + const struct soc_device_attribute *matches)
  193. +{
  194. + int ret = 0;
  195. +
  196. + if (!matches)
  197. + return NULL;
  198. +
  199. + while (!ret) {
  200. + if (!(matches->machine || matches->family ||
  201. + matches->revision || matches->soc_id))
  202. + break;
  203. + ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
  204. + soc_device_match_one);
  205. + if (!ret)
  206. + matches++;
  207. + else
  208. + return matches;
  209. + }
  210. + return NULL;
  211. +}
  212. +EXPORT_SYMBOL_GPL(soc_device_match);
  213. --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  214. +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  215. @@ -859,7 +859,7 @@ mlxsw_sp_port_get_sw_stats64(const struc
  216. return 0;
  217. }
  218. -static bool mlxsw_sp_port_has_offload_stats(int attr_id)
  219. +static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
  220. {
  221. switch (attr_id) {
  222. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  223. --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  224. +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  225. @@ -1405,7 +1405,7 @@ static void mlxsw_sp_fdb_call_notifiers(
  226. if (learning_sync) {
  227. info.addr = mac;
  228. info.vid = vid;
  229. - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
  230. + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  231. call_switchdev_notifiers(notifier_type, dev, &info.info);
  232. }
  233. }
  234. --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
  235. +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
  236. @@ -1939,10 +1939,10 @@ static void ofdpa_port_fdb_learn_work(st
  237. rtnl_lock();
  238. if (learned && removing)
  239. - call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
  240. + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
  241. lw->ofdpa_port->dev, &info.info);
  242. else if (learned && !removing)
  243. - call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
  244. + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
  245. lw->ofdpa_port->dev, &info.info);
  246. rtnl_unlock();
  247. --- a/include/linux/device.h
  248. +++ b/include/linux/device.h
  249. @@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru
  250. int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  251. void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  252. +/**
  253. + * devm_alloc_percpu - Resource-managed alloc_percpu
  254. + * @dev: Device to allocate per-cpu memory for
  255. + * @type: Type to allocate per-cpu memory for
  256. + *
  257. + * Managed alloc_percpu. Per-cpu memory allocated with this function is
  258. + * automatically freed on driver detach.
  259. + *
  260. + * RETURNS:
  261. + * Pointer to allocated memory on success, NULL on failure.
  262. + */
  263. +#define devm_alloc_percpu(dev, type) \
  264. + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
  265. + __alignof__(type)))
  266. +
  267. +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
  268. + size_t align);
  269. +void devm_free_percpu(struct device *dev, void __percpu *pdata);
  270. +
  271. static inline int devm_add_action_or_reset(struct device *dev,
  272. void (*action)(void *), void *data)
  273. {
  274. --- a/include/linux/dma-mapping.h
  275. +++ b/include/linux/dma-mapping.h
  276. @@ -164,6 +164,11 @@ int dma_mmap_from_coherent(struct device
  277. #ifdef CONFIG_HAS_DMA
  278. #include <asm/dma-mapping.h>
  279. +static inline void set_dma_ops(struct device *dev,
  280. + struct dma_map_ops *dma_ops)
  281. +{
  282. + dev->archdata.dma_ops = dma_ops;
  283. +}
  284. #else
  285. /*
  286. * Define the dma api to allow compilation but not linking of
  287. --- /dev/null
  288. +++ b/include/linux/fsl/svr.h
  289. @@ -0,0 +1,97 @@
  290. +/*
  291. + * MPC85xx cpu type detection
  292. + *
  293. + * Copyright 2011-2012 Freescale Semiconductor, Inc.
  294. + *
  295. + * This is free software; you can redistribute it and/or modify
  296. + * it under the terms of the GNU General Public License as published by
  297. + * the Free Software Foundation; either version 2 of the License, or
  298. + * (at your option) any later version.
  299. + */
  300. +
  301. +#ifndef FSL_SVR_H
  302. +#define FSL_SVR_H
  303. +
  304. +#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
  305. +#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
  306. +#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
  307. +
  308. +/* Some parts define SVR[0:23] as the SOC version */
  309. +#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
  310. +
  311. +#define SVR_8533 0x803400
  312. +#define SVR_8535 0x803701
  313. +#define SVR_8536 0x803700
  314. +#define SVR_8540 0x803000
  315. +#define SVR_8541 0x807200
  316. +#define SVR_8543 0x803200
  317. +#define SVR_8544 0x803401
  318. +#define SVR_8545 0x803102
  319. +#define SVR_8547 0x803101
  320. +#define SVR_8548 0x803100
  321. +#define SVR_8555 0x807100
  322. +#define SVR_8560 0x807000
  323. +#define SVR_8567 0x807501
  324. +#define SVR_8568 0x807500
  325. +#define SVR_8569 0x808000
  326. +#define SVR_8572 0x80E000
  327. +#define SVR_P1010 0x80F100
  328. +#define SVR_P1011 0x80E500
  329. +#define SVR_P1012 0x80E501
  330. +#define SVR_P1013 0x80E700
  331. +#define SVR_P1014 0x80F101
  332. +#define SVR_P1017 0x80F700
  333. +#define SVR_P1020 0x80E400
  334. +#define SVR_P1021 0x80E401
  335. +#define SVR_P1022 0x80E600
  336. +#define SVR_P1023 0x80F600
  337. +#define SVR_P1024 0x80E402
  338. +#define SVR_P1025 0x80E403
  339. +#define SVR_P2010 0x80E300
  340. +#define SVR_P2020 0x80E200
  341. +#define SVR_P2040 0x821000
  342. +#define SVR_P2041 0x821001
  343. +#define SVR_P3041 0x821103
  344. +#define SVR_P4040 0x820100
  345. +#define SVR_P4080 0x820000
  346. +#define SVR_P5010 0x822100
  347. +#define SVR_P5020 0x822000
  348. +#define SVR_P5021 0X820500
  349. +#define SVR_P5040 0x820400
  350. +#define SVR_T4240 0x824000
  351. +#define SVR_T4120 0x824001
  352. +#define SVR_T4160 0x824100
  353. +#define SVR_T4080 0x824102
  354. +#define SVR_C291 0x850000
  355. +#define SVR_C292 0x850020
  356. +#define SVR_C293 0x850030
  357. +#define SVR_B4860 0X868000
  358. +#define SVR_G4860 0x868001
  359. +#define SVR_G4060 0x868003
  360. +#define SVR_B4440 0x868100
  361. +#define SVR_G4440 0x868101
  362. +#define SVR_B4420 0x868102
  363. +#define SVR_B4220 0x868103
  364. +#define SVR_T1040 0x852000
  365. +#define SVR_T1041 0x852001
  366. +#define SVR_T1042 0x852002
  367. +#define SVR_T1020 0x852100
  368. +#define SVR_T1021 0x852101
  369. +#define SVR_T1022 0x852102
  370. +#define SVR_T1023 0x854100
  371. +#define SVR_T1024 0x854000
  372. +#define SVR_T2080 0x853000
  373. +#define SVR_T2081 0x853100
  374. +
  375. +#define SVR_8610 0x80A000
  376. +#define SVR_8641 0x809000
  377. +#define SVR_8641D 0x809001
  378. +
  379. +#define SVR_9130 0x860001
  380. +#define SVR_9131 0x860000
  381. +#define SVR_9132 0x861000
  382. +#define SVR_9232 0x861400
  383. +
  384. +#define SVR_Unknown 0xFFFFFF
  385. +
  386. +#endif
  387. --- a/include/linux/fsl_devices.h
  388. +++ b/include/linux/fsl_devices.h
  389. @@ -99,7 +99,10 @@ struct fsl_usb2_platform_data {
  390. unsigned suspended:1;
  391. unsigned already_suspended:1;
  392. unsigned has_fsl_erratum_a007792:1;
  393. + unsigned has_fsl_erratum_14:1;
  394. unsigned has_fsl_erratum_a005275:1;
  395. + unsigned has_fsl_erratum_a006918:1;
  396. + unsigned has_fsl_erratum_a005697:1;
  397. unsigned check_phy_clk_valid:1;
  398. /* register save area for suspend/resume */
  399. --- a/include/linux/irqdesc.h
  400. +++ b/include/linux/irqdesc.h
  401. @@ -46,6 +46,7 @@ struct pt_regs;
  402. * @rcu: rcu head for delayed free
  403. * @kobj: kobject used to represent this struct in sysfs
  404. * @dir: /proc/irq/ procfs entry
  405. + * @debugfs_file: dentry for the debugfs file
  406. * @name: flow handler name for /proc/interrupts output
  407. */
  408. struct irq_desc {
  409. @@ -88,6 +89,9 @@ struct irq_desc {
  410. #ifdef CONFIG_PROC_FS
  411. struct proc_dir_entry *dir;
  412. #endif
  413. +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  414. + struct dentry *debugfs_file;
  415. +#endif
  416. #ifdef CONFIG_SPARSE_IRQ
  417. struct rcu_head rcu;
  418. struct kobject kobj;
  419. --- a/include/linux/irqdomain.h
  420. +++ b/include/linux/irqdomain.h
  421. @@ -138,6 +138,7 @@ struct irq_domain_chip_generic;
  422. * setting up one or more generic chips for interrupt controllers
  423. * drivers using the generic chip library which uses this pointer.
  424. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
  425. + * @debugfs_file: dentry for the domain debugfs file
  426. *
  427. * Revmap data, used internally by irq_domain
  428. * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
  429. @@ -160,6 +161,9 @@ struct irq_domain {
  430. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  431. struct irq_domain *parent;
  432. #endif
  433. +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  434. + struct dentry *debugfs_file;
  435. +#endif
  436. /* reverse map data. The linear map gets appended to the irq_domain */
  437. irq_hw_number_t hwirq_max;
  438. @@ -174,8 +178,8 @@ enum {
  439. /* Irq domain is hierarchical */
  440. IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
  441. - /* Core calls alloc/free recursive through the domain hierarchy. */
  442. - IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
  443. + /* Irq domain name was allocated in __irq_domain_add() */
  444. + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
  445. /* Irq domain is an IPI domain with virq per cpu */
  446. IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
  447. @@ -231,6 +235,9 @@ static inline bool is_fwnode_irqchip(str
  448. return fwnode && fwnode->type == FWNODE_IRQCHIP;
  449. }
  450. +extern void irq_domain_update_bus_token(struct irq_domain *domain,
  451. + enum irq_domain_bus_token bus_token);
  452. +
  453. static inline
  454. struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
  455. enum irq_domain_bus_token bus_token)
  456. @@ -403,7 +410,7 @@ static inline int irq_domain_alloc_irqs(
  457. NULL);
  458. }
  459. -extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
  460. +extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  461. unsigned int irq_base,
  462. unsigned int nr_irqs, void *arg);
  463. extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
  464. --- a/include/linux/netdev_features.h
  465. +++ b/include/linux/netdev_features.h
  466. @@ -74,6 +74,7 @@ enum {
  467. NETIF_F_BUSY_POLL_BIT, /* Busy poll */
  468. NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
  469. + NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
  470. /*
  471. * Add your fresh new feature above and remember to update
  472. @@ -136,6 +137,7 @@ enum {
  473. #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
  474. #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
  475. #define NETIF_F_HW_TC __NETIF_F(HW_TC)
  476. +#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
  477. #define for_each_netdev_feature(mask_addr, bit) \
  478. for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
  479. --- a/include/linux/netdevice.h
  480. +++ b/include/linux/netdevice.h
  481. @@ -930,7 +930,7 @@ struct netdev_xdp {
  482. * 3. Update dev->stats asynchronously and atomically, and define
  483. * neither operation.
  484. *
  485. - * bool (*ndo_has_offload_stats)(int attr_id)
  486. + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
  487. * Return true if this device supports offload stats of this attr_id.
  488. *
  489. * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
  490. @@ -1167,7 +1167,7 @@ struct net_device_ops {
  491. struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  492. struct rtnl_link_stats64 *storage);
  493. - bool (*ndo_has_offload_stats)(int attr_id);
  494. + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
  495. int (*ndo_get_offload_stats)(int attr_id,
  496. const struct net_device *dev,
  497. void *attr_data);
  498. @@ -1509,6 +1509,8 @@ enum netdev_priv_flags {
  499. * @if_port: Selectable AUI, TP, ...
  500. * @dma: DMA channel
  501. * @mtu: Interface MTU value
  502. + * @min_mtu: Interface Minimum MTU value
  503. + * @max_mtu: Interface Maximum MTU value
  504. * @type: Interface hardware type
  505. * @hard_header_len: Maximum hardware header length.
  506. * @min_header_len: Minimum hardware header length
  507. @@ -1735,6 +1737,8 @@ struct net_device {
  508. unsigned char dma;
  509. unsigned int mtu;
  510. + unsigned int min_mtu;
  511. + unsigned int max_mtu;
  512. unsigned short type;
  513. unsigned short hard_header_len;
  514. unsigned short min_header_len;
  515. @@ -1938,6 +1942,8 @@ int netdev_set_prio_tc_map(struct net_de
  516. return 0;
  517. }
  518. +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
  519. +
  520. static inline
  521. void netdev_reset_tc(struct net_device *dev)
  522. {
  523. --- a/include/linux/skbuff.h
  524. +++ b/include/linux/skbuff.h
  525. @@ -903,6 +903,7 @@ void kfree_skb(struct sk_buff *skb);
  526. void kfree_skb_list(struct sk_buff *segs);
  527. void skb_tx_error(struct sk_buff *skb);
  528. void consume_skb(struct sk_buff *skb);
  529. +void skb_recycle(struct sk_buff *skb);
  530. void __kfree_skb(struct sk_buff *skb);
  531. extern struct kmem_cache *skbuff_head_cache;
  532. @@ -3059,6 +3060,7 @@ static inline void skb_free_datagram_loc
  533. }
  534. int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
  535. int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
  536. +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
  537. int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
  538. __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
  539. int len, __wsum csum);
  540. --- a/include/linux/sys_soc.h
  541. +++ b/include/linux/sys_soc.h
  542. @@ -13,6 +13,7 @@ struct soc_device_attribute {
  543. const char *family;
  544. const char *revision;
  545. const char *soc_id;
  546. + const void *data;
  547. };
  548. /**
  549. @@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_de
  550. */
  551. struct device *soc_device_to_device(struct soc_device *soc);
  552. +const struct soc_device_attribute *soc_device_match(
  553. + const struct soc_device_attribute *matches);
  554. #endif /* __SOC_BUS_H */
  555. --- a/include/net/switchdev.h
  556. +++ b/include/net/switchdev.h
  557. @@ -46,6 +46,7 @@ enum switchdev_attr_id {
  558. SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
  559. SWITCHDEV_ATTR_ID_PORT_STP_STATE,
  560. SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
  561. + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
  562. SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
  563. SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  564. };
  565. @@ -60,6 +61,7 @@ struct switchdev_attr {
  566. struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
  567. u8 stp_state; /* PORT_STP_STATE */
  568. unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
  569. + unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
  570. clock_t ageing_time; /* BRIDGE_AGEING_TIME */
  571. bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
  572. } u;
  573. @@ -149,8 +151,10 @@ struct switchdev_ops {
  574. };
  575. enum switchdev_notifier_type {
  576. - SWITCHDEV_FDB_ADD = 1,
  577. - SWITCHDEV_FDB_DEL,
  578. + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
  579. + SWITCHDEV_FDB_DEL_TO_BRIDGE,
  580. + SWITCHDEV_FDB_ADD_TO_DEVICE,
  581. + SWITCHDEV_FDB_DEL_TO_DEVICE,
  582. };
  583. struct switchdev_notifier_info {
  584. --- a/include/uapi/linux/if_ether.h
  585. +++ b/include/uapi/linux/if_ether.h
  586. @@ -36,6 +36,7 @@
  587. #define ETH_DATA_LEN 1500 /* Max. octets in payload */
  588. #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
  589. #define ETH_FCS_LEN 4 /* Octets in the FCS */
  590. +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
  591. /*
  592. * These are the defined Ethernet Protocol ID's.
  593. --- a/kernel/irq/Kconfig
  594. +++ b/kernel/irq/Kconfig
  595. @@ -108,4 +108,15 @@ config SPARSE_IRQ
  596. If you don't know what to do here, say N.
  597. +config GENERIC_IRQ_DEBUGFS
  598. + bool "Expose irq internals in debugfs"
  599. + depends on DEBUG_FS
  600. + default n
  601. + ---help---
  602. +
  603. + Exposes internal state information through debugfs. Mostly for
  604. + developers and debugging of hard to diagnose interrupt problems.
  605. +
  606. + If you don't know what to do here, say N.
  607. +
  608. endmenu
  609. --- a/kernel/irq/Makefile
  610. +++ b/kernel/irq/Makefile
  611. @@ -10,3 +10,4 @@ obj-$(CONFIG_PM_SLEEP) += pm.o
  612. obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
  613. obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
  614. obj-$(CONFIG_SMP) += affinity.o
  615. +obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
  616. --- /dev/null
  617. +++ b/kernel/irq/debugfs.c
  618. @@ -0,0 +1,215 @@
  619. +/*
  620. + * Copyright 2017 Thomas Gleixner <[email protected]>
  621. + *
  622. + * This file is licensed under the GPL V2.
  623. + */
  624. +#include <linux/debugfs.h>
  625. +#include <linux/irqdomain.h>
  626. +#include <linux/irq.h>
  627. +
  628. +#include "internals.h"
  629. +
  630. +static struct dentry *irq_dir;
  631. +
  632. +struct irq_bit_descr {
  633. + unsigned int mask;
  634. + char *name;
  635. +};
  636. +#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
  637. +
  638. +static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
  639. + const struct irq_bit_descr *sd, int size)
  640. +{
  641. + int i;
  642. +
  643. + for (i = 0; i < size; i++, sd++) {
  644. + if (state & sd->mask)
  645. + seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
  646. + }
  647. +}
  648. +
  649. +#ifdef CONFIG_SMP
  650. +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
  651. +{
  652. + struct irq_data *data = irq_desc_get_irq_data(desc);
  653. + struct cpumask *msk;
  654. +
  655. + msk = irq_data_get_affinity_mask(data);
  656. + seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
  657. +#ifdef CONFIG_GENERIC_PENDING_IRQ
  658. + msk = desc->pending_mask;
  659. + seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));
  660. +#endif
  661. +}
  662. +#else
  663. +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
  664. +#endif
  665. +
  666. +static const struct irq_bit_descr irqchip_flags[] = {
  667. + BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
  668. + BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
  669. + BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
  670. + BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
  671. + BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
  672. + BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
  673. + BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
  674. +};
  675. +
  676. +static void
  677. +irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
  678. +{
  679. + struct irq_chip *chip = data->chip;
  680. +
  681. + if (!chip) {
  682. + seq_printf(m, "chip: None\n");
  683. + return;
  684. + }
  685. + seq_printf(m, "%*schip: %s\n", ind, "", chip->name);
  686. + seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
  687. + irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
  688. + ARRAY_SIZE(irqchip_flags));
  689. +}
  690. +
  691. +static void
  692. +irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
  693. +{
  694. + seq_printf(m, "%*sdomain: %s\n", ind, "",
  695. + data->domain ? data->domain->name : "");
  696. + seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
  697. + irq_debug_show_chip(m, data, ind + 1);
  698. +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  699. + if (!data->parent_data)
  700. + return;
  701. + seq_printf(m, "%*sparent:\n", ind + 1, "");
  702. + irq_debug_show_data(m, data->parent_data, ind + 4);
  703. +#endif
  704. +}
  705. +
  706. +static const struct irq_bit_descr irqdata_states[] = {
  707. + BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
  708. + BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
  709. + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
  710. + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
  711. + BIT_MASK_DESCR(IRQD_LEVEL),
  712. +
  713. + BIT_MASK_DESCR(IRQD_ACTIVATED),
  714. + BIT_MASK_DESCR(IRQD_IRQ_STARTED),
  715. + BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
  716. + BIT_MASK_DESCR(IRQD_IRQ_MASKED),
  717. + BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
  718. +
  719. + BIT_MASK_DESCR(IRQD_PER_CPU),
  720. + BIT_MASK_DESCR(IRQD_NO_BALANCING),
  721. +
  722. + BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
  723. + BIT_MASK_DESCR(IRQD_AFFINITY_SET),
  724. + BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
  725. + BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
  726. + BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
  727. +
  728. + BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
  729. +
  730. + BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
  731. + BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
  732. +};
  733. +
  734. +static const struct irq_bit_descr irqdesc_states[] = {
  735. + BIT_MASK_DESCR(_IRQ_NOPROBE),
  736. + BIT_MASK_DESCR(_IRQ_NOREQUEST),
  737. + BIT_MASK_DESCR(_IRQ_NOTHREAD),
  738. + BIT_MASK_DESCR(_IRQ_NOAUTOEN),
  739. + BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
  740. + BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
  741. + BIT_MASK_DESCR(_IRQ_IS_POLLED),
  742. + BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
  743. +};
  744. +
  745. +static const struct irq_bit_descr irqdesc_istates[] = {
  746. + BIT_MASK_DESCR(IRQS_AUTODETECT),
  747. + BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
  748. + BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
  749. + BIT_MASK_DESCR(IRQS_ONESHOT),
  750. + BIT_MASK_DESCR(IRQS_REPLAY),
  751. + BIT_MASK_DESCR(IRQS_WAITING),
  752. + BIT_MASK_DESCR(IRQS_PENDING),
  753. + BIT_MASK_DESCR(IRQS_SUSPENDED),
  754. +};
  755. +
  756. +
  757. +static int irq_debug_show(struct seq_file *m, void *p)
  758. +{
  759. + struct irq_desc *desc = m->private;
  760. + struct irq_data *data;
  761. +
  762. + raw_spin_lock_irq(&desc->lock);
  763. + data = irq_desc_get_irq_data(desc);
  764. + seq_printf(m, "handler: %pf\n", desc->handle_irq);
  765. + seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
  766. + irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
  767. + ARRAY_SIZE(irqdesc_states));
  768. + seq_printf(m, "istate: 0x%08x\n", desc->istate);
  769. + irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
  770. + ARRAY_SIZE(irqdesc_istates));
  771. + seq_printf(m, "ddepth: %u\n", desc->depth);
  772. + seq_printf(m, "wdepth: %u\n", desc->wake_depth);
  773. + seq_printf(m, "dstate: 0x%08x\n", irqd_get(data));
  774. + irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
  775. + ARRAY_SIZE(irqdata_states));
  776. + seq_printf(m, "node: %d\n", irq_data_get_node(data));
  777. + irq_debug_show_masks(m, desc);
  778. + irq_debug_show_data(m, data, 0);
  779. + raw_spin_unlock_irq(&desc->lock);
  780. + return 0;
  781. +}
  782. +
  783. +static int irq_debug_open(struct inode *inode, struct file *file)
  784. +{
  785. + return single_open(file, irq_debug_show, inode->i_private);
  786. +}
  787. +
  788. +static const struct file_operations dfs_irq_ops = {
  789. + .open = irq_debug_open,
  790. + .read = seq_read,
  791. + .llseek = seq_lseek,
  792. + .release = single_release,
  793. +};
  794. +
  795. +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
  796. +{
  797. + char name [10];
  798. +
  799. + if (!irq_dir || !desc || desc->debugfs_file)
  800. + return;
  801. +
  802. + sprintf(name, "%d", irq);
  803. + desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc,
  804. + &dfs_irq_ops);
  805. +}
  806. +
  807. +void irq_remove_debugfs_entry(struct irq_desc *desc)
  808. +{
  809. + if (desc->debugfs_file)
  810. + debugfs_remove(desc->debugfs_file);
  811. +}
  812. +
  813. +static int __init irq_debugfs_init(void)
  814. +{
  815. + struct dentry *root_dir;
  816. + int irq;
  817. +
  818. + root_dir = debugfs_create_dir("irq", NULL);
  819. + if (!root_dir)
  820. + return -ENOMEM;
  821. +
  822. + irq_domain_debugfs_init(root_dir);
  823. +
  824. + irq_dir = debugfs_create_dir("irqs", root_dir);
  825. +
  826. + irq_lock_sparse();
  827. + for_each_active_irq(irq)
  828. + irq_add_debugfs_entry(irq, irq_to_desc(irq));
  829. + irq_unlock_sparse();
  830. +
  831. + return 0;
  832. +}
  833. +__initcall(irq_debugfs_init);
  834. --- a/kernel/irq/internals.h
  835. +++ b/kernel/irq/internals.h
  836. @@ -169,6 +169,11 @@ irq_put_desc_unlock(struct irq_desc *des
  837. #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
  838. +static inline unsigned int irqd_get(struct irq_data *d)
  839. +{
  840. + return __irqd_to_state(d);
  841. +}
  842. +
  843. /*
  844. * Manipulation functions for irq_data.state
  845. */
  846. @@ -226,3 +231,20 @@ irq_pm_install_action(struct irq_desc *d
  847. static inline void
  848. irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
  849. #endif
  850. +
  851. +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  852. +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
  853. +void irq_remove_debugfs_entry(struct irq_desc *desc);
  854. +# ifdef CONFIG_IRQ_DOMAIN
  855. +void irq_domain_debugfs_init(struct dentry *root);
  856. +# else
  857. +static inline void irq_domain_debugfs_init(struct dentry *root);
  858. +# endif
  859. +#else /* CONFIG_GENERIC_IRQ_DEBUGFS */
  860. +static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
  861. +{
  862. +}
  863. +static inline void irq_remove_debugfs_entry(struct irq_desc *d)
  864. +{
  865. +}
  866. +#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
  867. --- a/kernel/irq/irqdesc.c
  868. +++ b/kernel/irq/irqdesc.c
  869. @@ -394,6 +394,7 @@ static void free_desc(unsigned int irq)
  870. {
  871. struct irq_desc *desc = irq_to_desc(irq);
  872. + irq_remove_debugfs_entry(desc);
  873. unregister_irq_proc(irq, desc);
  874. /*
  875. --- a/kernel/irq/irqdomain.c
  876. +++ b/kernel/irq/irqdomain.c
  877. @@ -31,6 +31,14 @@ struct irqchip_fwid {
  878. void *data;
  879. };
  880. +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  881. +static void debugfs_add_domain_dir(struct irq_domain *d);
  882. +static void debugfs_remove_domain_dir(struct irq_domain *d);
  883. +#else
  884. +static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  885. +static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  886. +#endif
  887. +
  888. /**
  889. * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  890. * identifying an irq domain
  891. @@ -117,6 +125,7 @@ struct irq_domain *__irq_domain_add(stru
  892. irq_domain_check_hierarchy(domain);
  893. mutex_lock(&irq_domain_mutex);
  894. + debugfs_add_domain_dir(domain);
  895. list_add(&domain->link, &irq_domain_list);
  896. mutex_unlock(&irq_domain_mutex);
  897. @@ -136,6 +145,7 @@ EXPORT_SYMBOL_GPL(__irq_domain_add);
  898. void irq_domain_remove(struct irq_domain *domain)
  899. {
  900. mutex_lock(&irq_domain_mutex);
  901. + debugfs_remove_domain_dir(domain);
  902. WARN_ON(!radix_tree_empty(&domain->revmap_tree));
  903. @@ -156,6 +166,37 @@ void irq_domain_remove(struct irq_domain
  904. }
  905. EXPORT_SYMBOL_GPL(irq_domain_remove);
  906. +void irq_domain_update_bus_token(struct irq_domain *domain,
  907. + enum irq_domain_bus_token bus_token)
  908. +{
  909. + char *name;
  910. +
  911. + if (domain->bus_token == bus_token)
  912. + return;
  913. +
  914. + mutex_lock(&irq_domain_mutex);
  915. +
  916. + domain->bus_token = bus_token;
  917. +
  918. + name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
  919. + if (!name) {
  920. + mutex_unlock(&irq_domain_mutex);
  921. + return;
  922. + }
  923. +
  924. + debugfs_remove_domain_dir(domain);
  925. +
  926. + if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  927. + kfree(domain->name);
  928. + else
  929. + domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  930. +
  931. + domain->name = name;
  932. + debugfs_add_domain_dir(domain);
  933. +
  934. + mutex_unlock(&irq_domain_mutex);
  935. +}
  936. +
  937. /**
  938. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  939. * @of_node: pointer to interrupt controller's device tree node.
  940. @@ -1164,43 +1205,18 @@ void irq_domain_free_irqs_top(struct irq
  941. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  942. }
  943. -static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
  944. -{
  945. - return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
  946. -}
  947. -
  948. -static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
  949. +static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
  950. unsigned int irq_base,
  951. unsigned int nr_irqs)
  952. {
  953. domain->ops->free(domain, irq_base, nr_irqs);
  954. - if (irq_domain_is_auto_recursive(domain)) {
  955. - BUG_ON(!domain->parent);
  956. - irq_domain_free_irqs_recursive(domain->parent, irq_base,
  957. - nr_irqs);
  958. - }
  959. }
  960. -int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
  961. +int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  962. unsigned int irq_base,
  963. unsigned int nr_irqs, void *arg)
  964. {
  965. - int ret = 0;
  966. - struct irq_domain *parent = domain->parent;
  967. - bool recursive = irq_domain_is_auto_recursive(domain);
  968. -
  969. - BUG_ON(recursive && !parent);
  970. - if (recursive)
  971. - ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
  972. - nr_irqs, arg);
  973. - if (ret < 0)
  974. - return ret;
  975. -
  976. - ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  977. - if (ret < 0 && recursive)
  978. - irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
  979. -
  980. - return ret;
  981. + return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  982. }
  983. /**
  984. @@ -1261,7 +1277,7 @@ int __irq_domain_alloc_irqs(struct irq_d
  985. }
  986. mutex_lock(&irq_domain_mutex);
  987. - ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
  988. + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
  989. if (ret < 0) {
  990. mutex_unlock(&irq_domain_mutex);
  991. goto out_free_irq_data;
  992. @@ -1296,7 +1312,7 @@ void irq_domain_free_irqs(unsigned int v
  993. mutex_lock(&irq_domain_mutex);
  994. for (i = 0; i < nr_irqs; i++)
  995. irq_domain_remove_irq(virq + i);
  996. - irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
  997. + irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
  998. mutex_unlock(&irq_domain_mutex);
  999. irq_domain_free_irq_data(virq, nr_irqs);
  1000. @@ -1316,15 +1332,11 @@ int irq_domain_alloc_irqs_parent(struct
  1001. unsigned int irq_base, unsigned int nr_irqs,
  1002. void *arg)
  1003. {
  1004. - /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
  1005. - if (irq_domain_is_auto_recursive(domain))
  1006. - return 0;
  1007. + if (!domain->parent)
  1008. + return -ENOSYS;
  1009. - domain = domain->parent;
  1010. - if (domain)
  1011. - return irq_domain_alloc_irqs_recursive(domain, irq_base,
  1012. - nr_irqs, arg);
  1013. - return -ENOSYS;
  1014. + return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
  1015. + nr_irqs, arg);
  1016. }
  1017. EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
  1018. @@ -1339,10 +1351,10 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_
  1019. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1020. unsigned int irq_base, unsigned int nr_irqs)
  1021. {
  1022. - /* irq_domain_free_irqs_recursive() will call parent's free */
  1023. - if (!irq_domain_is_auto_recursive(domain) && domain->parent)
  1024. - irq_domain_free_irqs_recursive(domain->parent, irq_base,
  1025. - nr_irqs);
  1026. + if (!domain->parent)
  1027. + return;
  1028. +
  1029. + irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
  1030. }
  1031. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  1032. @@ -1448,3 +1460,78 @@ static void irq_domain_check_hierarchy(s
  1033. {
  1034. }
  1035. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1036. +
  1037. +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  1038. +static struct dentry *domain_dir;
  1039. +
  1040. +static void
  1041. +irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
  1042. +{
  1043. + seq_printf(m, "%*sname: %s\n", ind, "", d->name);
  1044. + seq_printf(m, "%*ssize: %u\n", ind + 1, "",
  1045. + d->revmap_size + d->revmap_direct_max_irq);
  1046. + seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
  1047. + seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
  1048. +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1049. + if (!d->parent)
  1050. + return;
  1051. + seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
  1052. + irq_domain_debug_show_one(m, d->parent, ind + 4);
  1053. +#endif
  1054. +}
  1055. +
  1056. +static int irq_domain_debug_show(struct seq_file *m, void *p)
  1057. +{
  1058. + struct irq_domain *d = m->private;
  1059. +
  1060. + /* Default domain? Might be NULL */
  1061. + if (!d) {
  1062. + if (!irq_default_domain)
  1063. + return 0;
  1064. + d = irq_default_domain;
  1065. + }
  1066. + irq_domain_debug_show_one(m, d, 0);
  1067. + return 0;
  1068. +}
  1069. +
  1070. +static int irq_domain_debug_open(struct inode *inode, struct file *file)
  1071. +{
  1072. + return single_open(file, irq_domain_debug_show, inode->i_private);
  1073. +}
  1074. +
  1075. +static const struct file_operations dfs_domain_ops = {
  1076. + .open = irq_domain_debug_open,
  1077. + .read = seq_read,
  1078. + .llseek = seq_lseek,
  1079. + .release = single_release,
  1080. +};
  1081. +
  1082. +static void debugfs_add_domain_dir(struct irq_domain *d)
  1083. +{
  1084. + if (!d->name || !domain_dir || d->debugfs_file)
  1085. + return;
  1086. + d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
  1087. + &dfs_domain_ops);
  1088. +}
  1089. +
  1090. +static void debugfs_remove_domain_dir(struct irq_domain *d)
  1091. +{
  1092. + if (d->debugfs_file)
  1093. + debugfs_remove(d->debugfs_file);
  1094. +}
  1095. +
  1096. +void __init irq_domain_debugfs_init(struct dentry *root)
  1097. +{
  1098. + struct irq_domain *d;
  1099. +
  1100. + domain_dir = debugfs_create_dir("domains", root);
  1101. + if (!domain_dir)
  1102. + return;
  1103. +
  1104. + debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
  1105. + mutex_lock(&irq_domain_mutex);
  1106. + list_for_each_entry(d, &irq_domain_list, link)
  1107. + debugfs_add_domain_dir(d);
  1108. + mutex_unlock(&irq_domain_mutex);
  1109. +}
  1110. +#endif
  1111. --- a/kernel/irq/manage.c
  1112. +++ b/kernel/irq/manage.c
  1113. @@ -1387,6 +1387,7 @@ __setup_irq(unsigned int irq, struct irq
  1114. wake_up_process(new->secondary->thread);
  1115. register_irq_proc(irq, desc);
  1116. + irq_add_debugfs_entry(irq, desc);
  1117. new->dir = NULL;
  1118. register_handler_proc(irq, new);
  1119. free_cpumask_var(mask);
  1120. --- a/kernel/irq/msi.c
  1121. +++ b/kernel/irq/msi.c
  1122. @@ -310,7 +310,7 @@ int msi_domain_populate_irqs(struct irq_
  1123. ops->set_desc(arg, desc);
  1124. /* Assumes the domain mutex is held! */
  1125. - ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg);
  1126. + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
  1127. if (ret)
  1128. break;
  1129. --- a/net/bridge/br.c
  1130. +++ b/net/bridge/br.c
  1131. @@ -138,14 +138,14 @@ static int br_switchdev_event(struct not
  1132. br = p->br;
  1133. switch (event) {
  1134. - case SWITCHDEV_FDB_ADD:
  1135. + case SWITCHDEV_FDB_ADD_TO_BRIDGE:
  1136. fdb_info = ptr;
  1137. err = br_fdb_external_learn_add(br, p, fdb_info->addr,
  1138. fdb_info->vid);
  1139. if (err)
  1140. err = notifier_from_errno(err);
  1141. break;
  1142. - case SWITCHDEV_FDB_DEL:
  1143. + case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  1144. fdb_info = ptr;
  1145. err = br_fdb_external_learn_del(br, p, fdb_info->addr,
  1146. fdb_info->vid);
  1147. --- a/net/bridge/br_fdb.c
  1148. +++ b/net/bridge/br_fdb.c
  1149. @@ -688,6 +688,8 @@ static void fdb_notify(struct net_bridge
  1150. struct sk_buff *skb;
  1151. int err = -ENOBUFS;
  1152. + br_switchdev_fdb_notify(fdb, type);
  1153. +
  1154. skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
  1155. if (skb == NULL)
  1156. goto errout;
  1157. --- a/net/bridge/br_private.h
  1158. +++ b/net/bridge/br_private.h
  1159. @@ -1060,6 +1060,8 @@ void nbp_switchdev_frame_mark(const stru
  1160. struct sk_buff *skb);
  1161. bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
  1162. const struct sk_buff *skb);
  1163. +void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
  1164. + int type);
  1165. #else
  1166. static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
  1167. {
  1168. @@ -1076,6 +1078,11 @@ static inline bool nbp_switchdev_allowed
  1169. {
  1170. return true;
  1171. }
  1172. +
  1173. +static inline void
  1174. +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
  1175. +{
  1176. +}
  1177. #endif /* CONFIG_NET_SWITCHDEV */
  1178. #endif
  1179. --- a/net/bridge/br_switchdev.c
  1180. +++ b/net/bridge/br_switchdev.c
  1181. @@ -55,3 +55,36 @@ bool nbp_switchdev_allowed_egress(const
  1182. return !skb->offload_fwd_mark ||
  1183. BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
  1184. }
  1185. +
  1186. +static void
  1187. +br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
  1188. + u16 vid, struct net_device *dev)
  1189. +{
  1190. + struct switchdev_notifier_fdb_info info;
  1191. + unsigned long notifier_type;
  1192. +
  1193. + info.addr = mac;
  1194. + info.vid = vid;
  1195. + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
  1196. + call_switchdev_notifiers(notifier_type, dev, &info.info);
  1197. +}
  1198. +
  1199. +void
  1200. +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
  1201. +{
  1202. + if (!fdb->added_by_user)
  1203. + return;
  1204. +
  1205. + switch (type) {
  1206. + case RTM_DELNEIGH:
  1207. + br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
  1208. + fdb->vlan_id,
  1209. + fdb->dst->dev);
  1210. + break;
  1211. + case RTM_NEWNEIGH:
  1212. + br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
  1213. + fdb->vlan_id,
  1214. + fdb->dst->dev);
  1215. + break;
  1216. + }
  1217. +}
  1218. --- a/net/core/dev.c
  1219. +++ b/net/core/dev.c
  1220. @@ -1968,6 +1968,23 @@ static void netif_setup_tc(struct net_de
  1221. }
  1222. }
  1223. +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
  1224. +{
  1225. + if (dev->num_tc) {
  1226. + struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1227. + int i;
  1228. +
  1229. + for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
  1230. + if ((txq - tc->offset) < tc->count)
  1231. + return i;
  1232. + }
  1233. +
  1234. + return -1;
  1235. + }
  1236. +
  1237. + return 0;
  1238. +}
  1239. +
  1240. #ifdef CONFIG_XPS
  1241. static DEFINE_MUTEX(xps_map_mutex);
  1242. #define xmap_dereference(P) \
  1243. @@ -6630,9 +6647,18 @@ int dev_set_mtu(struct net_device *dev,
  1244. if (new_mtu == dev->mtu)
  1245. return 0;
  1246. - /* MTU must be positive. */
  1247. - if (new_mtu < 0)
  1248. + /* MTU must be positive, and in range */
  1249. + if (new_mtu < 0 || new_mtu < dev->min_mtu) {
  1250. + net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
  1251. + dev->name, new_mtu, dev->min_mtu);
  1252. return -EINVAL;
  1253. + }
  1254. +
  1255. + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
  1256. + net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
  1257. + dev->name, new_mtu, dev->min_mtu);
  1258. + return -EINVAL;
  1259. + }
  1260. if (!netif_device_present(dev))
  1261. return -ENODEV;
  1262. --- a/net/core/net-sysfs.c
  1263. +++ b/net/core/net-sysfs.c
  1264. @@ -1021,7 +1021,6 @@ static ssize_t show_trans_timeout(struct
  1265. return sprintf(buf, "%lu", trans_timeout);
  1266. }
  1267. -#ifdef CONFIG_XPS
  1268. static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
  1269. {
  1270. struct net_device *dev = queue->dev;
  1271. @@ -1033,6 +1032,21 @@ static unsigned int get_netdev_queue_ind
  1272. return i;
  1273. }
  1274. +static ssize_t show_traffic_class(struct netdev_queue *queue,
  1275. + struct netdev_queue_attribute *attribute,
  1276. + char *buf)
  1277. +{
  1278. + struct net_device *dev = queue->dev;
  1279. + int index = get_netdev_queue_index(queue);
  1280. + int tc = netdev_txq_to_tc(dev, index);
  1281. +
  1282. + if (tc < 0)
  1283. + return -EINVAL;
  1284. +
  1285. + return sprintf(buf, "%u\n", tc);
  1286. +}
  1287. +
  1288. +#ifdef CONFIG_XPS
  1289. static ssize_t show_tx_maxrate(struct netdev_queue *queue,
  1290. struct netdev_queue_attribute *attribute,
  1291. char *buf)
  1292. @@ -1075,6 +1089,9 @@ static struct netdev_queue_attribute que
  1293. static struct netdev_queue_attribute queue_trans_timeout =
  1294. __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
  1295. +static struct netdev_queue_attribute queue_traffic_class =
  1296. + __ATTR(traffic_class, S_IRUGO, show_traffic_class, NULL);
  1297. +
  1298. #ifdef CONFIG_BQL
  1299. /*
  1300. * Byte queue limits sysfs structures and functions.
  1301. @@ -1260,6 +1277,7 @@ static struct netdev_queue_attribute xps
  1302. static struct attribute *netdev_queue_default_attrs[] = {
  1303. &queue_trans_timeout.attr,
  1304. + &queue_traffic_class.attr,
  1305. #ifdef CONFIG_XPS
  1306. &xps_cpus_attribute.attr,
  1307. &queue_tx_maxrate.attr,
  1308. --- a/net/core/rtnetlink.c
  1309. +++ b/net/core/rtnetlink.c
  1310. @@ -3690,7 +3690,7 @@ static int rtnl_get_offload_stats(struct
  1311. if (!size)
  1312. continue;
  1313. - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
  1314. + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
  1315. continue;
  1316. attr = nla_reserve_64bit(skb, attr_id, size,
  1317. @@ -3731,7 +3731,7 @@ static int rtnl_get_offload_stats_size(c
  1318. for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
  1319. attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
  1320. - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
  1321. + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
  1322. continue;
  1323. size = rtnl_get_offload_stats_attr_size(attr_id);
  1324. nla_size += nla_total_size_64bit(size);
  1325. --- a/net/core/skbuff.c
  1326. +++ b/net/core/skbuff.c
  1327. @@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk
  1328. }
  1329. EXPORT_SYMBOL(napi_consume_skb);
  1330. +/**
  1331. + * skb_recycle - clean up an skb for reuse
  1332. + * @skb: buffer
  1333. + *
  1334. + * Recycles the skb to be reused as a receive buffer. This
  1335. + * function does any necessary reference count dropping, and
  1336. + * cleans up the skbuff as if it just came from __alloc_skb().
  1337. + */
  1338. +void skb_recycle(struct sk_buff *skb)
  1339. +{
  1340. + struct skb_shared_info *shinfo;
  1341. + u8 head_frag = skb->head_frag;
  1342. +
  1343. + skb_release_head_state(skb);
  1344. +
  1345. + shinfo = skb_shinfo(skb);
  1346. + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
  1347. + atomic_set(&shinfo->dataref, 1);
  1348. +
  1349. + memset(skb, 0, offsetof(struct sk_buff, tail));
  1350. + skb->data = skb->head + NET_SKB_PAD;
  1351. + skb->head_frag = head_frag;
  1352. + skb_reset_tail_pointer(skb);
  1353. +}
  1354. +EXPORT_SYMBOL(skb_recycle);
  1355. +
  1356. /* Make sure a field is enclosed inside headers_start/headers_end section */
  1357. #define CHECK_SKB_FIELD(field) \
  1358. BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
  1359. @@ -1075,7 +1101,7 @@ static void skb_headers_offset_update(st
  1360. skb->inner_mac_header += off;
  1361. }
  1362. -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  1363. +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  1364. {
  1365. __copy_skb_header(new, old);
  1366. @@ -1083,6 +1109,7 @@ static void copy_skb_header(struct sk_bu
  1367. skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  1368. skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  1369. }
  1370. +EXPORT_SYMBOL(copy_skb_header);
  1371. static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
  1372. {
  1373. --- a/net/sched/sch_generic.c
  1374. +++ b/net/sched/sch_generic.c
  1375. @@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long a
  1376. txq->trans_timeout++;
  1377. break;
  1378. }
  1379. +
  1380. + /* Devices with HW_ACCEL_MQ have multiple txqs
  1381. + * but update only the first one's transmission
  1382. + * timestamp so avoid checking the rest.
  1383. + */
  1384. + if (dev->features & NETIF_F_HW_ACCEL_MQ)
  1385. + break;
  1386. }
  1387. if (some_queue_timedout) {