806-v5.16-0001-nvmem-core-rework-nvmem-cell-instance-creation.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. From 7ae6478b304bc004c3139b422665b0e23b57f05c Mon Sep 17 00:00:00 2001
  2. From: Srinivas Kandagatla <[email protected]>
  3. Date: Wed, 13 Oct 2021 14:19:55 +0100
  4. Subject: [PATCH] nvmem: core: rework nvmem cell instance creation
  5. In the existing design, we do not create a instance per nvmem cell consumer
  6. but we directly refer cell from nvmem cell list that are added to provider.
  7. However this design has some limitations when consumers want to assign name
  8. or connection id the nvmem cell instance, ex: via "nvmem-cell-names" or
  9. id in nvmem_cell_get(id).
  10. Having a name associated with nvmem cell consumer instance will help
  11. provider drivers in performing post processing of nvmem cell data if required
  12. before data is seen by the consumers. This is pretty normal with some vendors
  13. storing nvmem cells like mac-address in a vendor specific data layouts that
  14. are not directly usable by the consumer drivers.
  15. With this patch nvmem cell will be created dynamically during nvmem_cell_get
  16. and destroyed in nvmem_cell_put, allowing consumers to associate name with
  17. nvmem cell consumer instance.
  18. With this patch a new struct nvmem_cell_entry replaces struct nvmem_cell
  19. for storing nvmem cell information within the core.
  20. This patch does not change nvmem-consumer interface based on nvmem_cell.
  21. Tested-by: Joakim Zhang <[email protected]>
  22. Signed-off-by: Srinivas Kandagatla <[email protected]>
  23. Link: https://lore.kernel.org/r/[email protected]
  24. Signed-off-by: Greg Kroah-Hartman <[email protected]>
  25. ---
  26. drivers/nvmem/core.c | 165 +++++++++++++++++++++++++++----------------
  27. 1 file changed, 105 insertions(+), 60 deletions(-)
  28. --- a/drivers/nvmem/core.c
  29. +++ b/drivers/nvmem/core.c
  30. @@ -45,8 +45,7 @@ struct nvmem_device {
  31. #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
  32. #define FLAG_COMPAT BIT(0)
  33. -
  34. -struct nvmem_cell {
  35. +struct nvmem_cell_entry {
  36. const char *name;
  37. int offset;
  38. int bytes;
  39. @@ -57,6 +56,11 @@ struct nvmem_cell {
  40. struct list_head node;
  41. };
  42. +struct nvmem_cell {
  43. + struct nvmem_cell_entry *entry;
  44. + const char *id;
  45. +};
  46. +
  47. static DEFINE_MUTEX(nvmem_mutex);
  48. static DEFINE_IDA(nvmem_ida);
  49. @@ -424,7 +428,7 @@ static struct bus_type nvmem_bus_type =
  50. .name = "nvmem",
  51. };
  52. -static void nvmem_cell_drop(struct nvmem_cell *cell)
  53. +static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
  54. {
  55. blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
  56. mutex_lock(&nvmem_mutex);
  57. @@ -437,13 +441,13 @@ static void nvmem_cell_drop(struct nvmem
  58. static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
  59. {
  60. - struct nvmem_cell *cell, *p;
  61. + struct nvmem_cell_entry *cell, *p;
  62. list_for_each_entry_safe(cell, p, &nvmem->cells, node)
  63. - nvmem_cell_drop(cell);
  64. + nvmem_cell_entry_drop(cell);
  65. }
  66. -static void nvmem_cell_add(struct nvmem_cell *cell)
  67. +static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
  68. {
  69. mutex_lock(&nvmem_mutex);
  70. list_add_tail(&cell->node, &cell->nvmem->cells);
  71. @@ -451,9 +455,9 @@ static void nvmem_cell_add(struct nvmem_
  72. blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
  73. }
  74. -static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
  75. - const struct nvmem_cell_info *info,
  76. - struct nvmem_cell *cell)
  77. +static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
  78. + const struct nvmem_cell_info *info,
  79. + struct nvmem_cell_entry *cell)
  80. {
  81. cell->nvmem = nvmem;
  82. cell->offset = info->offset;
  83. @@ -477,13 +481,13 @@ static int nvmem_cell_info_to_nvmem_cell
  84. return 0;
  85. }
  86. -static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
  87. - const struct nvmem_cell_info *info,
  88. - struct nvmem_cell *cell)
  89. +static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
  90. + const struct nvmem_cell_info *info,
  91. + struct nvmem_cell_entry *cell)
  92. {
  93. int err;
  94. - err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
  95. + err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
  96. if (err)
  97. return err;
  98. @@ -507,7 +511,7 @@ static int nvmem_add_cells(struct nvmem_
  99. const struct nvmem_cell_info *info,
  100. int ncells)
  101. {
  102. - struct nvmem_cell **cells;
  103. + struct nvmem_cell_entry **cells;
  104. int i, rval;
  105. cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
  106. @@ -521,13 +525,13 @@ static int nvmem_add_cells(struct nvmem_
  107. goto err;
  108. }
  109. - rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
  110. + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
  111. if (rval) {
  112. kfree(cells[i]);
  113. goto err;
  114. }
  115. - nvmem_cell_add(cells[i]);
  116. + nvmem_cell_entry_add(cells[i]);
  117. }
  118. /* remove tmp array */
  119. @@ -536,7 +540,7 @@ static int nvmem_add_cells(struct nvmem_
  120. return 0;
  121. err:
  122. while (i--)
  123. - nvmem_cell_drop(cells[i]);
  124. + nvmem_cell_entry_drop(cells[i]);
  125. kfree(cells);
  126. @@ -573,7 +577,7 @@ static int nvmem_add_cells_from_table(st
  127. {
  128. const struct nvmem_cell_info *info;
  129. struct nvmem_cell_table *table;
  130. - struct nvmem_cell *cell;
  131. + struct nvmem_cell_entry *cell;
  132. int rval = 0, i;
  133. mutex_lock(&nvmem_cell_mutex);
  134. @@ -588,15 +592,13 @@ static int nvmem_add_cells_from_table(st
  135. goto out;
  136. }
  137. - rval = nvmem_cell_info_to_nvmem_cell(nvmem,
  138. - info,
  139. - cell);
  140. + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
  141. if (rval) {
  142. kfree(cell);
  143. goto out;
  144. }
  145. - nvmem_cell_add(cell);
  146. + nvmem_cell_entry_add(cell);
  147. }
  148. }
  149. }
  150. @@ -606,10 +608,10 @@ out:
  151. return rval;
  152. }
  153. -static struct nvmem_cell *
  154. -nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
  155. +static struct nvmem_cell_entry *
  156. +nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
  157. {
  158. - struct nvmem_cell *iter, *cell = NULL;
  159. + struct nvmem_cell_entry *iter, *cell = NULL;
  160. mutex_lock(&nvmem_mutex);
  161. list_for_each_entry(iter, &nvmem->cells, node) {
  162. @@ -680,7 +682,7 @@ static int nvmem_add_cells_from_of(struc
  163. {
  164. struct device_node *parent, *child;
  165. struct device *dev = &nvmem->dev;
  166. - struct nvmem_cell *cell;
  167. + struct nvmem_cell_entry *cell;
  168. const __be32 *addr;
  169. int len;
  170. @@ -729,7 +731,7 @@ static int nvmem_add_cells_from_of(struc
  171. }
  172. cell->np = of_node_get(child);
  173. - nvmem_cell_add(cell);
  174. + nvmem_cell_entry_add(cell);
  175. }
  176. return 0;
  177. @@ -1144,9 +1146,33 @@ struct nvmem_device *devm_nvmem_device_g
  178. }
  179. EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
  180. +static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
  181. +{
  182. + struct nvmem_cell *cell;
  183. + const char *name = NULL;
  184. +
  185. + cell = kzalloc(sizeof(*cell), GFP_KERNEL);
  186. + if (!cell)
  187. + return ERR_PTR(-ENOMEM);
  188. +
  189. + if (id) {
  190. + name = kstrdup_const(id, GFP_KERNEL);
  191. + if (!name) {
  192. + kfree(cell);
  193. + return ERR_PTR(-ENOMEM);
  194. + }
  195. + }
  196. +
  197. + cell->id = name;
  198. + cell->entry = entry;
  199. +
  200. + return cell;
  201. +}
  202. +
  203. static struct nvmem_cell *
  204. nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
  205. {
  206. + struct nvmem_cell_entry *cell_entry;
  207. struct nvmem_cell *cell = ERR_PTR(-ENOENT);
  208. struct nvmem_cell_lookup *lookup;
  209. struct nvmem_device *nvmem;
  210. @@ -1171,11 +1197,15 @@ nvmem_cell_get_from_lookup(struct device
  211. break;
  212. }
  213. - cell = nvmem_find_cell_by_name(nvmem,
  214. - lookup->cell_name);
  215. - if (!cell) {
  216. + cell_entry = nvmem_find_cell_entry_by_name(nvmem,
  217. + lookup->cell_name);
  218. + if (!cell_entry) {
  219. __nvmem_device_put(nvmem);
  220. cell = ERR_PTR(-ENOENT);
  221. + } else {
  222. + cell = nvmem_create_cell(cell_entry, con_id);
  223. + if (IS_ERR(cell))
  224. + __nvmem_device_put(nvmem);
  225. }
  226. break;
  227. }
  228. @@ -1186,10 +1216,10 @@ nvmem_cell_get_from_lookup(struct device
  229. }
  230. #if IS_ENABLED(CONFIG_OF)
  231. -static struct nvmem_cell *
  232. -nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
  233. +static struct nvmem_cell_entry *
  234. +nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
  235. {
  236. - struct nvmem_cell *iter, *cell = NULL;
  237. + struct nvmem_cell_entry *iter, *cell = NULL;
  238. mutex_lock(&nvmem_mutex);
  239. list_for_each_entry(iter, &nvmem->cells, node) {
  240. @@ -1219,6 +1249,7 @@ struct nvmem_cell *of_nvmem_cell_get(str
  241. {
  242. struct device_node *cell_np, *nvmem_np;
  243. struct nvmem_device *nvmem;
  244. + struct nvmem_cell_entry *cell_entry;
  245. struct nvmem_cell *cell;
  246. int index = 0;
  247. @@ -1239,12 +1270,16 @@ struct nvmem_cell *of_nvmem_cell_get(str
  248. if (IS_ERR(nvmem))
  249. return ERR_CAST(nvmem);
  250. - cell = nvmem_find_cell_by_node(nvmem, cell_np);
  251. - if (!cell) {
  252. + cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
  253. + if (!cell_entry) {
  254. __nvmem_device_put(nvmem);
  255. return ERR_PTR(-ENOENT);
  256. }
  257. + cell = nvmem_create_cell(cell_entry, id);
  258. + if (IS_ERR(cell))
  259. + __nvmem_device_put(nvmem);
  260. +
  261. return cell;
  262. }
  263. EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
  264. @@ -1350,13 +1385,17 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
  265. */
  266. void nvmem_cell_put(struct nvmem_cell *cell)
  267. {
  268. - struct nvmem_device *nvmem = cell->nvmem;
  269. + struct nvmem_device *nvmem = cell->entry->nvmem;
  270. +
  271. + if (cell->id)
  272. + kfree_const(cell->id);
  273. + kfree(cell);
  274. __nvmem_device_put(nvmem);
  275. }
  276. EXPORT_SYMBOL_GPL(nvmem_cell_put);
  277. -static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
  278. +static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
  279. {
  280. u8 *p, *b;
  281. int i, extra, bit_offset = cell->bit_offset;
  282. @@ -1390,8 +1429,8 @@ static void nvmem_shift_read_buffer_in_p
  283. }
  284. static int __nvmem_cell_read(struct nvmem_device *nvmem,
  285. - struct nvmem_cell *cell,
  286. - void *buf, size_t *len)
  287. + struct nvmem_cell_entry *cell,
  288. + void *buf, size_t *len, const char *id)
  289. {
  290. int rc;
  291. @@ -1422,18 +1461,18 @@ static int __nvmem_cell_read(struct nvme
  292. */
  293. void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
  294. {
  295. - struct nvmem_device *nvmem = cell->nvmem;
  296. + struct nvmem_device *nvmem = cell->entry->nvmem;
  297. u8 *buf;
  298. int rc;
  299. if (!nvmem)
  300. return ERR_PTR(-EINVAL);
  301. - buf = kzalloc(cell->bytes, GFP_KERNEL);
  302. + buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
  303. if (!buf)
  304. return ERR_PTR(-ENOMEM);
  305. - rc = __nvmem_cell_read(nvmem, cell, buf, len);
  306. + rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
  307. if (rc) {
  308. kfree(buf);
  309. return ERR_PTR(rc);
  310. @@ -1443,7 +1482,7 @@ void *nvmem_cell_read(struct nvmem_cell
  311. }
  312. EXPORT_SYMBOL_GPL(nvmem_cell_read);
  313. -static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
  314. +static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
  315. u8 *_buf, int len)
  316. {
  317. struct nvmem_device *nvmem = cell->nvmem;
  318. @@ -1496,16 +1535,7 @@ err:
  319. return ERR_PTR(rc);
  320. }
  321. -/**
  322. - * nvmem_cell_write() - Write to a given nvmem cell
  323. - *
  324. - * @cell: nvmem cell to be written.
  325. - * @buf: Buffer to be written.
  326. - * @len: length of buffer to be written to nvmem cell.
  327. - *
  328. - * Return: length of bytes written or negative on failure.
  329. - */
  330. -int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
  331. +static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
  332. {
  333. struct nvmem_device *nvmem = cell->nvmem;
  334. int rc;
  335. @@ -1531,6 +1561,21 @@ int nvmem_cell_write(struct nvmem_cell *
  336. return len;
  337. }
  338. +
  339. +/**
  340. + * nvmem_cell_write() - Write to a given nvmem cell
  341. + *
  342. + * @cell: nvmem cell to be written.
  343. + * @buf: Buffer to be written.
  344. + * @len: length of buffer to be written to nvmem cell.
  345. + *
  346. + * Return: length of bytes written or negative on failure.
  347. + */
  348. +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
  349. +{
  350. + return __nvmem_cell_entry_write(cell->entry, buf, len);
  351. +}
  352. +
  353. EXPORT_SYMBOL_GPL(nvmem_cell_write);
  354. static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
  355. @@ -1633,7 +1678,7 @@ static const void *nvmem_cell_read_varia
  356. if (IS_ERR(cell))
  357. return cell;
  358. - nbits = cell->nbits;
  359. + nbits = cell->entry->nbits;
  360. buf = nvmem_cell_read(cell, len);
  361. nvmem_cell_put(cell);
  362. if (IS_ERR(buf))
  363. @@ -1729,18 +1774,18 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_variab
  364. ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
  365. struct nvmem_cell_info *info, void *buf)
  366. {
  367. - struct nvmem_cell cell;
  368. + struct nvmem_cell_entry cell;
  369. int rc;
  370. ssize_t len;
  371. if (!nvmem)
  372. return -EINVAL;
  373. - rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
  374. + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
  375. if (rc)
  376. return rc;
  377. - rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
  378. + rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
  379. if (rc)
  380. return rc;
  381. @@ -1760,17 +1805,17 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read
  382. int nvmem_device_cell_write(struct nvmem_device *nvmem,
  383. struct nvmem_cell_info *info, void *buf)
  384. {
  385. - struct nvmem_cell cell;
  386. + struct nvmem_cell_entry cell;
  387. int rc;
  388. if (!nvmem)
  389. return -EINVAL;
  390. - rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
  391. + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
  392. if (rc)
  393. return rc;
  394. - return nvmem_cell_write(&cell, buf, cell.bytes);
  395. + return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
  396. }
  397. EXPORT_SYMBOL_GPL(nvmem_device_cell_write);