0018-driver-e24-add-e24-driver.patch 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296
  1. From b37b26232ebb6c0a61b530f11ccd6eefdf782c04 Mon Sep 17 00:00:00 2001
  2. From: "shanlong.li" <[email protected]>
  3. Date: Fri, 16 Jun 2023 03:02:14 -0700
  4. Subject: [PATCH 18/55] driver:e24: add e24 driver
  5. add e24 driver
  6. Signed-off-by: shanlong.li <[email protected]>
  7. ---
  8. drivers/Kconfig | 1 +
  9. drivers/Makefile | 1 +
  10. drivers/e24/Kconfig | 5 +
  11. drivers/e24/Makefile | 12 +
  12. drivers/e24/e24_alloc.c | 241 ++++++
  13. drivers/e24/e24_alloc.h | 59 ++
  14. drivers/e24/starfive_e24.c | 1524 +++++++++++++++++++++++++++++++++
  15. drivers/e24/starfive_e24.h | 159 ++++
  16. drivers/e24/starfive_e24_hw.c | 134 +++
  17. drivers/e24/starfive_e24_hw.h | 94 ++
  18. 10 files changed, 2230 insertions(+)
  19. create mode 100644 drivers/e24/Kconfig
  20. create mode 100644 drivers/e24/Makefile
  21. create mode 100644 drivers/e24/e24_alloc.c
  22. create mode 100644 drivers/e24/e24_alloc.h
  23. create mode 100644 drivers/e24/starfive_e24.c
  24. create mode 100644 drivers/e24/starfive_e24.h
  25. create mode 100644 drivers/e24/starfive_e24_hw.c
  26. create mode 100644 drivers/e24/starfive_e24_hw.h
  27. --- a/drivers/Kconfig
  28. +++ b/drivers/Kconfig
  29. @@ -245,4 +245,5 @@ source "drivers/cdx/Kconfig"
  30. source "drivers/dpll/Kconfig"
  31. +source "drivers/e24/Kconfig"
  32. endmenu
  33. --- a/drivers/Makefile
  34. +++ b/drivers/Makefile
  35. @@ -195,3 +195,4 @@ obj-$(CONFIG_CDX_BUS) += cdx/
  36. obj-$(CONFIG_DPLL) += dpll/
  37. obj-$(CONFIG_S390) += s390/
  38. +obj-$(CONFIG_E24) += e24/
  39. --- /dev/null
  40. +++ b/drivers/e24/Kconfig
  41. @@ -0,0 +1,5 @@
  42. +config E24
  43. + tristate "E24 support"
  44. + default m
  45. + help
  46. + This module provides the function of E24 device.
  47. --- /dev/null
  48. +++ b/drivers/e24/Makefile
  49. @@ -0,0 +1,12 @@
  50. +# SPDX-License-Identifier: GPL-2.0
  51. +# Copyright(c) 1999 - 2018 Intel Corporation.
  52. +#
  53. +# Makefile for the E24 driver
  54. +#
  55. +ccflags-y += -I$(srctree)/drivers/e24
  56. +#ccflags-y += -DDEBUG
  57. +ccflags-y += -Wunused-variable -Wno-error=missing-prototypes
  58. +
  59. +obj-$(CONFIG_E24) += e24.o
  60. +
  61. +e24-y := starfive_e24.o starfive_e24_hw.o e24_alloc.o
  62. --- /dev/null
  63. +++ b/drivers/e24/e24_alloc.c
  64. @@ -0,0 +1,241 @@
  65. +// SPDX-License-Identifier: GPL-2.0
  66. +#include <linux/atomic.h>
  67. +#include <linux/kernel.h>
  68. +#include <linux/mutex.h>
  69. +#include <linux/printk.h>
  70. +#include <linux/slab.h>
  71. +#include "e24_alloc.h"
  72. +
  73. +struct e24_private_pool {
  74. + struct e24_allocation_pool pool;
  75. + struct mutex free_list_lock;
  76. + phys_addr_t start;
  77. + u32 size;
  78. + struct e24_allocation *free_list;
  79. +};
  80. +
  81. +static void e24_private_free(struct e24_allocation *e24_allocation)
  82. +{
  83. + struct e24_private_pool *pool = container_of(e24_allocation->pool,
  84. + struct e24_private_pool,
  85. + pool);
  86. + struct e24_allocation **pcur;
  87. +
  88. + pr_debug("%s: %pap x %d\n", __func__,
  89. + &e24_allocation->start, e24_allocation->size);
  90. +
  91. + mutex_lock(&pool->free_list_lock);
  92. +
  93. + for (pcur = &pool->free_list; ; pcur = &(*pcur)->next) {
  94. + struct e24_allocation *cur = *pcur;
  95. +
  96. + if (cur && cur->start + cur->size == e24_allocation->start) {
  97. + struct e24_allocation *next = cur->next;
  98. +
  99. + pr_debug("merging block tail: %pap x 0x%x ->\n",
  100. + &cur->start, cur->size);
  101. + cur->size += e24_allocation->size;
  102. + pr_debug("... -> %pap x 0x%x\n",
  103. + &cur->start, cur->size);
  104. + kfree(e24_allocation);
  105. +
  106. + if (next && cur->start + cur->size == next->start) {
  107. + pr_debug("merging with next block: %pap x 0x%x ->\n",
  108. + &cur->start, cur->size);
  109. + cur->size += next->size;
  110. + cur->next = next->next;
  111. + pr_debug("... -> %pap x 0x%x\n",
  112. + &cur->start, cur->size);
  113. + kfree(next);
  114. + }
  115. + break;
  116. + }
  117. +
  118. + if (!cur || e24_allocation->start < cur->start) {
  119. + if (cur && e24_allocation->start + e24_allocation->size ==
  120. + cur->start) {
  121. + pr_debug("merging block head: %pap x 0x%x ->\n",
  122. + &cur->start, cur->size);
  123. + cur->size += e24_allocation->size;
  124. + cur->start = e24_allocation->start;
  125. + pr_debug("... -> %pap x 0x%x\n",
  126. + &cur->start, cur->size);
  127. + kfree(e24_allocation);
  128. + } else {
  129. + pr_debug("inserting new free block\n");
  130. + e24_allocation->next = cur;
  131. + *pcur = e24_allocation;
  132. + }
  133. + break;
  134. + }
  135. + }
  136. +
  137. + mutex_unlock(&pool->free_list_lock);
  138. +}
  139. +
  140. +static long e24_private_alloc(struct e24_allocation_pool *pool,
  141. + u32 size, u32 align,
  142. + struct e24_allocation **alloc)
  143. +{
  144. + struct e24_private_pool *ppool = container_of(pool,
  145. + struct e24_private_pool,
  146. + pool);
  147. + struct e24_allocation **pcur;
  148. + struct e24_allocation *cur = NULL;
  149. + struct e24_allocation *new;
  150. + phys_addr_t aligned_start = 0;
  151. + bool found = false;
  152. +
  153. + if (!size || (align & (align - 1)))
  154. + return -EINVAL;
  155. + if (!align)
  156. + align = 1;
  157. +
  158. + new = kzalloc(sizeof(struct e24_allocation), GFP_KERNEL);
  159. + if (!new)
  160. + return -ENOMEM;
  161. +
  162. + align = ALIGN(align, PAGE_SIZE);
  163. + size = ALIGN(size, PAGE_SIZE);
  164. +
  165. + mutex_lock(&ppool->free_list_lock);
  166. +
  167. + /* on exit free list is fixed */
  168. + for (pcur = &ppool->free_list; *pcur; pcur = &(*pcur)->next) {
  169. + cur = *pcur;
  170. + aligned_start = ALIGN(cur->start, align);
  171. +
  172. + if (aligned_start >= cur->start &&
  173. + aligned_start - cur->start + size <= cur->size) {
  174. + if (aligned_start == cur->start) {
  175. + if (aligned_start + size == cur->start + cur->size) {
  176. + pr_debug("reusing complete block: %pap x %x\n",
  177. + &cur->start, cur->size);
  178. + *pcur = cur->next;
  179. + } else {
  180. + pr_debug("cutting block head: %pap x %x ->\n",
  181. + &cur->start, cur->size);
  182. + cur->size -= aligned_start + size - cur->start;
  183. + cur->start = aligned_start + size;
  184. + pr_debug("... -> %pap x %x\n",
  185. + &cur->start, cur->size);
  186. + cur = NULL;
  187. + }
  188. + } else {
  189. + if (aligned_start + size == cur->start + cur->size) {
  190. + pr_debug("cutting block tail: %pap x %x ->\n",
  191. + &cur->start, cur->size);
  192. + cur->size = aligned_start - cur->start;
  193. + pr_debug("... -> %pap x %x\n",
  194. + &cur->start, cur->size);
  195. + cur = NULL;
  196. + } else {
  197. + pr_debug("splitting block into two: %pap x %x ->\n",
  198. + &cur->start, cur->size);
  199. + new->start = aligned_start + size;
  200. + new->size = cur->start +
  201. + cur->size - new->start;
  202. +
  203. + cur->size = aligned_start - cur->start;
  204. +
  205. + new->next = cur->next;
  206. + cur->next = new;
  207. + pr_debug("... -> %pap x %x + %pap x %x\n",
  208. + &cur->start, cur->size,
  209. + &new->start, new->size);
  210. +
  211. + cur = NULL;
  212. + new = NULL;
  213. + }
  214. + }
  215. + found = true;
  216. + break;
  217. + } else {
  218. + cur = NULL;
  219. + }
  220. + }
  221. +
  222. + mutex_unlock(&ppool->free_list_lock);
  223. +
  224. + if (!found) {
  225. + kfree(cur);
  226. + kfree(new);
  227. + return -ENOMEM;
  228. + }
  229. +
  230. + if (!cur) {
  231. + cur = new;
  232. + new = NULL;
  233. + }
  234. + if (!cur) {
  235. + cur = kzalloc(sizeof(struct e24_allocation), GFP_KERNEL);
  236. + if (!cur)
  237. + return -ENOMEM;
  238. + }
  239. +
  240. + kfree(new);
  241. + pr_debug("returning: %pap x %x\n", &aligned_start, size);
  242. + cur->start = aligned_start;
  243. + cur->size = size;
  244. + cur->pool = pool;
  245. + atomic_set(&cur->ref, 0);
  246. + atomic_inc(&cur->ref);
  247. + *alloc = cur;
  248. +
  249. + return 0;
  250. +}
  251. +
  252. +static void e24_private_free_pool(struct e24_allocation_pool *pool)
  253. +{
  254. + struct e24_private_pool *ppool = container_of(pool,
  255. + struct e24_private_pool,
  256. + pool);
  257. + kfree(ppool->free_list);
  258. + kfree(ppool);
  259. +}
  260. +
  261. +static phys_addr_t e24_private_offset(const struct e24_allocation *allocation)
  262. +{
  263. + struct e24_private_pool *ppool = container_of(allocation->pool,
  264. + struct e24_private_pool,
  265. + pool);
  266. + return allocation->start - ppool->start;
  267. +}
  268. +
  269. +static const struct e24_allocation_ops e24_private_pool_ops = {
  270. + .alloc = e24_private_alloc,
  271. + .free = e24_private_free,
  272. + .free_pool = e24_private_free_pool,
  273. + .offset = e24_private_offset,
  274. +};
  275. +
  276. +long e24_init_private_pool(struct e24_allocation_pool **ppool,
  277. + phys_addr_t start, u32 size)
  278. +{
  279. + struct e24_private_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  280. + struct e24_allocation *allocation = kmalloc(sizeof(*allocation),
  281. + GFP_KERNEL);
  282. +
  283. + if (!pool || !allocation) {
  284. + kfree(pool);
  285. + kfree(allocation);
  286. + return -ENOMEM;
  287. + }
  288. +
  289. + *allocation = (struct e24_allocation){
  290. + .pool = &pool->pool,
  291. + .start = start,
  292. + .size = size,
  293. + };
  294. + *pool = (struct e24_private_pool){
  295. + .pool = {
  296. + .ops = &e24_private_pool_ops,
  297. + },
  298. + .start = start,
  299. + .size = size,
  300. + .free_list = allocation,
  301. + };
  302. + mutex_init(&pool->free_list_lock);
  303. + *ppool = &pool->pool;
  304. + return 0;
  305. +}
  306. --- /dev/null
  307. +++ b/drivers/e24/e24_alloc.h
  308. @@ -0,0 +1,59 @@
  309. +/* SPDX-License-Identifier: GPL-2.0 */
  310. +#ifndef E24_ALLOC_H
  311. +#define E24_ALLOC_H
  312. +
  313. +struct e24_allocation_pool;
  314. +struct e24_allocation;
  315. +
  316. +struct e24_allocation_ops {
  317. + long (*alloc)(struct e24_allocation_pool *allocation_pool,
  318. + u32 size, u32 align, struct e24_allocation **alloc);
  319. + void (*free)(struct e24_allocation *allocation);
  320. + void (*free_pool)(struct e24_allocation_pool *allocation_pool);
  321. + phys_addr_t (*offset)(const struct e24_allocation *allocation);
  322. +};
  323. +
  324. +struct e24_allocation_pool {
  325. + const struct e24_allocation_ops *ops;
  326. +};
  327. +
  328. +struct e24_allocation {
  329. + struct e24_allocation_pool *pool;
  330. + struct e24_allocation *next;
  331. + phys_addr_t start;
  332. + u32 size;
  333. + atomic_t ref;
  334. +};
  335. +
  336. +static inline void e24_free_pool(struct e24_allocation_pool *allocation_pool)
  337. +{
  338. + allocation_pool->ops->free_pool(allocation_pool);
  339. +}
  340. +
  341. +static inline void e24_free(struct e24_allocation *allocation)
  342. +{
  343. + return allocation->pool->ops->free(allocation);
  344. +}
  345. +
  346. +static inline long e24_allocate(struct e24_allocation_pool *allocation_pool,
  347. + u32 size, u32 align,
  348. + struct e24_allocation **alloc)
  349. +{
  350. + return allocation_pool->ops->alloc(allocation_pool,
  351. + size, align, alloc);
  352. +}
  353. +
  354. +static inline void e24_allocation_put(struct e24_allocation *e24_allocation)
  355. +{
  356. + if (atomic_dec_and_test(&e24_allocation->ref))
  357. + e24_allocation->pool->ops->free(e24_allocation);
  358. +}
  359. +
  360. +static inline phys_addr_t e24_allocation_offset(const struct e24_allocation *allocation)
  361. +{
  362. + return allocation->pool->ops->offset(allocation);
  363. +}
  364. +
  365. +long e24_init_private_pool(struct e24_allocation_pool **ppool, phys_addr_t start, u32 size);
  366. +
  367. +#endif
  368. --- /dev/null
  369. +++ b/drivers/e24/starfive_e24.c
  370. @@ -0,0 +1,1524 @@
  371. +// SPDX-License-Identifier: GPL-2.0
  372. +/*
  373. + * e24 driver for StarFive JH7110 SoC
  374. + *
  375. + * Copyright (c) 2021 StarFive Technology Co., Ltd.
  376. + * Author: Shanlong Li <[email protected]>
  377. + */
  378. +#include <linux/version.h>
  379. +#include <linux/atomic.h>
  380. +#include <linux/acpi.h>
  381. +#include <linux/completion.h>
  382. +#include <linux/delay.h>
  383. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
  384. +#include <linux/dma-mapping.h>
  385. +#else
  386. +#include <linux/dma-direct.h>
  387. +#endif
  388. +#include <linux/firmware.h>
  389. +#include <linux/fs.h>
  390. +#include <linux/hashtable.h>
  391. +#include <linux/highmem.h>
  392. +#include <linux/idr.h>
  393. +#include <linux/interrupt.h>
  394. +#include <linux/io.h>
  395. +#include <linux/kernel.h>
  396. +#include <linux/module.h>
  397. +#include <linux/of.h>
  398. +#include <linux/of_address.h>
  399. +#include <linux/of_device.h>
  400. +#include <linux/of_reserved_mem.h>
  401. +#include <linux/platform_device.h>
  402. +#include <linux/pm_runtime.h>
  403. +#include <linux/property.h>
  404. +#include <linux/sched.h>
  405. +#include <linux/slab.h>
  406. +#include <linux/sort.h>
  407. +#include <linux/mman.h>
  408. +#include <linux/uaccess.h>
  409. +#include <linux/mailbox_controller.h>
  410. +#include <linux/mailbox_client.h>
  411. +
  412. +#include <linux/bsearch.h>
  413. +
  414. +#include "e24_alloc.h"
  415. +#include "starfive_e24.h"
  416. +#include "starfive_e24_hw.h"
  417. +
  418. +#define EMBOX_MAX_MSG_LEN 4
  419. +
  420. +static DEFINE_IDA(e24_nodeid);
  421. +
  422. +struct e24_dsp_cmd {
  423. + __u32 flags;
  424. + __u32 in_data_size;
  425. + __u32 out_data_size;
  426. + union {
  427. + __u32 in_data_addr;
  428. + __u8 in_data[E24_DSP_CMD_INLINE_DATA_SIZE];
  429. + };
  430. + union {
  431. + __u32 out_data_addr;
  432. + __u8 out_data[E24_DSP_CMD_INLINE_DATA_SIZE];
  433. + };
  434. +};
  435. +
  436. +struct e24_ioctl_user {
  437. + u32 flags;
  438. + u32 in_data_size;
  439. + u32 out_data_size;
  440. + u64 in_data_addr;
  441. + u64 out_data_addr;
  442. +};
  443. +
  444. +struct e24_ioctl_request {
  445. + struct e24_ioctl_user ioctl_data;
  446. + phys_addr_t in_data_phys;
  447. + phys_addr_t out_data_phys;
  448. + struct e24_mapping *buffer_mapping;
  449. +
  450. + union {
  451. + struct e24_mapping in_data_mapping;
  452. + u8 in_data[E24_DSP_CMD_INLINE_DATA_SIZE];
  453. + };
  454. + union {
  455. + struct e24_mapping out_data_mapping;
  456. + u8 out_data[E24_DSP_CMD_INLINE_DATA_SIZE];
  457. + };
  458. +};
  459. +
  460. +static int firmware_command_timeout = 10;
  461. +
  462. +static inline void e24_comm_read(volatile void __iomem *addr, void *p,
  463. + size_t sz)
  464. +{
  465. + size_t sz32 = sz & ~3;
  466. + u32 v;
  467. +
  468. + while (sz32) {
  469. + v = __raw_readl(addr);
  470. + memcpy(p, &v, sizeof(v));
  471. + p += 4;
  472. + addr += 4;
  473. + sz32 -= 4;
  474. + }
  475. + sz &= 3;
  476. + if (sz) {
  477. + v = __raw_readl(addr);
  478. + memcpy(p, &v, sz);
  479. + }
  480. +}
  481. +
  482. +static inline void e24_comm_write(volatile void __iomem *addr, const void *p,
  483. + size_t sz)
  484. +{
  485. + size_t sz32 = sz & ~3;
  486. + u32 v;
  487. +
  488. + while (sz32) {
  489. + memcpy(&v, p, sizeof(v));
  490. + __raw_writel(v, addr);
  491. + p += 4;
  492. + addr += 4;
  493. + sz32 -= 4;
  494. + }
  495. + sz &= 3;
  496. + if (sz) {
  497. + v = 0;
  498. + memcpy(&v, p, sz);
  499. + __raw_writel(v, addr);
  500. + }
  501. +}
  502. +
  503. +static bool e24_cacheable(struct e24_device *e24_dat, unsigned long pfn,
  504. + unsigned long n_pages)
  505. +{
  506. + if (e24_dat->hw_ops->cacheable) {
  507. + return e24_dat->hw_ops->cacheable(e24_dat->hw_arg, pfn, n_pages);
  508. + } else {
  509. + unsigned long i;
  510. +
  511. + for (i = 0; i < n_pages; ++i)
  512. + if (!pfn_valid(pfn + i))
  513. + return false;
  514. + return true;
  515. + }
  516. +}
  517. +
  518. +static int e24_compare_address_sort(const void *a, const void *b)
  519. +{
  520. + const struct e24_address_map_entry *pa = a;
  521. + const struct e24_address_map_entry *pb = b;
  522. +
  523. + if (pa->src_addr < pb->src_addr &&
  524. + pb->src_addr - pa->src_addr >= pa->size)
  525. + return -1;
  526. + if (pa->src_addr > pb->src_addr &&
  527. + pa->src_addr - pb->src_addr >= pb->size)
  528. + return 1;
  529. +
  530. + return 0;
  531. +}
  532. +
  533. +static int e24_compare_address_search(const void *a, const void *b)
  534. +{
  535. + const phys_addr_t *pa = a;
  536. +
  537. + return e24_compare_address(*pa, b);
  538. +}
  539. +
  540. +struct e24_address_map_entry *
  541. +e24_get_address_mapping(const struct e24_address_map *map, phys_addr_t addr)
  542. +{
  543. + return bsearch(&addr, map->entry, map->n, sizeof(*map->entry),
  544. + e24_compare_address_search);
  545. +}
  546. +
  547. +u32 e24_translate_to_dsp(const struct e24_address_map *map, phys_addr_t addr)
  548. +{
  549. +#ifdef E24_MEM_MAP
  550. + return addr;
  551. +#else
  552. + struct e24_address_map_entry *entry = e24_get_address_mapping(map, addr);
  553. +
  554. + if (!entry)
  555. + return E24_NO_TRANSLATION;
  556. + return entry->dst_addr + addr - entry->src_addr;
  557. +#endif
  558. +}
  559. +
  560. +static int e24_dma_direction(unsigned int flags)
  561. +{
  562. + static const enum dma_data_direction e24_dma_direction[] = {
  563. + [0] = DMA_NONE,
  564. + [E24_FLAG_READ] = DMA_TO_DEVICE,
  565. + [E24_FLAG_WRITE] = DMA_FROM_DEVICE,
  566. + [E24_FLAG_READ_WRITE] = DMA_BIDIRECTIONAL,
  567. + };
  568. + return e24_dma_direction[flags & E24_FLAG_READ_WRITE];
  569. +}
  570. +
  571. +static void e24_dma_sync_for_cpu(struct e24_device *e24_dat,
  572. + unsigned long virt,
  573. + phys_addr_t phys,
  574. + unsigned long size,
  575. + unsigned long flags)
  576. +{
  577. + if (e24_dat->hw_ops->dma_sync_for_cpu)
  578. + e24_dat->hw_ops->dma_sync_for_cpu(e24_dat->hw_arg,
  579. + (void *)virt, phys, size,
  580. + flags);
  581. + else
  582. + dma_sync_single_for_cpu(e24_dat->dev, phys_to_dma(e24_dat->dev, phys), size,
  583. + e24_dma_direction(flags));
  584. +}
  585. +
  586. +static void starfive_mbox_receive_message(struct mbox_client *client, void *message)
  587. +{
  588. + struct e24_device *e24_dat = dev_get_drvdata(client->dev);
  589. +
  590. + complete(&e24_dat->tx_channel->tx_complete);
  591. +}
  592. +
  593. +static struct mbox_chan *
  594. +starfive_mbox_request_channel(struct device *dev, const char *name)
  595. +{
  596. + struct mbox_client *client;
  597. + struct mbox_chan *channel;
  598. +
  599. + client = devm_kzalloc(dev, sizeof(*client), GFP_KERNEL);
  600. + if (!client)
  601. + return ERR_PTR(-ENOMEM);
  602. +
  603. + client->dev = dev;
  604. + client->rx_callback = starfive_mbox_receive_message;
  605. + client->tx_prepare = NULL;
  606. + client->tx_done = NULL;
  607. + client->tx_block = true;
  608. + client->knows_txdone = false;
  609. + client->tx_tout = 3000;
  610. +
  611. + channel = mbox_request_channel_byname(client, name);
  612. + if (IS_ERR(channel)) {
  613. + dev_warn(dev, "Failed to request %s channel\n", name);
  614. + return NULL;
  615. + }
  616. +
  617. + return channel;
  618. +}
  619. +
  620. +static void e24_vm_open(struct vm_area_struct *vma)
  621. +{
  622. + struct e24_allocation *cur = vma->vm_private_data;
  623. +
  624. + atomic_inc(&cur->ref);
  625. +}
  626. +
  627. +static void e24_vm_close(struct vm_area_struct *vma)
  628. +{
  629. + e24_allocation_put(vma->vm_private_data);
  630. +}
  631. +
  632. +static const struct vm_operations_struct e24_vm_ops = {
  633. + .open = e24_vm_open,
  634. + .close = e24_vm_close,
  635. +};
  636. +
  637. +static long e24_synchronize(struct e24_device *dev)
  638. +{
  639. +
  640. + struct e24_comm *queue = dev->queue;
  641. + struct e24_dsp_cmd __iomem *cmd = queue->comm;
  642. + u32 flags;
  643. + unsigned long deadline = jiffies + 10 * HZ;
  644. +
  645. + do {
  646. + flags = __raw_readl(&cmd->flags);
  647. + /* memory barrier */
  648. + rmb();
  649. + if (flags == 0x104)
  650. + return 0;
  651. +
  652. + schedule();
  653. + } while (time_before(jiffies, deadline));
  654. +
  655. + return -1;
  656. +}
  657. +
  658. +static int e24_open(struct inode *inode, struct file *filp)
  659. +{
  660. + struct e24_device *e24_dev = container_of(filp->private_data,
  661. + struct e24_device, miscdev);
  662. + int rc = 0;
  663. +
  664. + rc = pm_runtime_get_sync(e24_dev->dev);
  665. + if (rc < 0)
  666. + return rc;
  667. +
  668. + spin_lock_init(&e24_dev->busy_list_lock);
  669. + filp->private_data = e24_dev;
  670. + mdelay(1);
  671. +
  672. + return 0;
  673. +}
  674. +
  675. +int e24_release(struct inode *inode, struct file *filp)
  676. +{
  677. + struct e24_device *e24_dev = (struct e24_device *)filp->private_data;
  678. + int rc = 0;
  679. +
  680. + rc = pm_runtime_put_sync(e24_dev->dev);
  681. + if (rc < 0)
  682. + return rc;
  683. +
  684. + return 0;
  685. +}
  686. +
  687. +static ssize_t mbox_e24_message_write(struct file *filp,
  688. + const char __user *userbuf,
  689. + size_t count, loff_t *ppos)
  690. +{
  691. + struct e24_device *edev = filp->private_data;
  692. + void *data;
  693. + int ret;
  694. +
  695. + if (!edev->tx_channel) {
  696. + dev_err(edev->dev, "Channel cannot do Tx\n");
  697. + return -EINVAL;
  698. + }
  699. +
  700. + if (count > EMBOX_MAX_MSG_LEN) {
  701. + dev_err(edev->dev,
  702. + "Message length %zd greater than max allowed %d\n",
  703. + count, EMBOX_MAX_MSG_LEN);
  704. + return -EINVAL;
  705. + }
  706. +
  707. + edev->message = kzalloc(EMBOX_MAX_MSG_LEN, GFP_KERNEL);
  708. + if (!edev->message)
  709. + return -ENOMEM;
  710. +
  711. + ret = copy_from_user(edev->message, userbuf, count);
  712. + if (ret) {
  713. + ret = -EFAULT;
  714. + goto out;
  715. + }
  716. +
  717. + print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
  718. + edev->message, EMBOX_MAX_MSG_LEN);
  719. + data = edev->message;
  720. + pr_debug("%s:%d, %d\n", __func__, __LINE__, *((int *)data));
  721. + ret = mbox_send_message(edev->tx_channel, data);
  722. +
  723. + if (ret < 0 || !edev->tx_channel->active_req)
  724. + dev_err(edev->dev, "Failed to send message via mailbox:%d\n", ret);
  725. +
  726. +out:
  727. + kfree(edev->message);
  728. + edev->tx_channel->active_req = NULL;
  729. +
  730. + return ret < 0 ? ret : count;
  731. +}
  732. +
  733. +static long _e24_copy_user_phys(struct e24_device *edev,
  734. + unsigned long vaddr, unsigned long size,
  735. + phys_addr_t paddr, unsigned long flags,
  736. + bool to_phys)
  737. +{
  738. + void __iomem *p = ioremap(paddr, size);
  739. + unsigned long rc;
  740. +
  741. + if (!p) {
  742. + dev_err(edev->dev,
  743. + "couldn't ioremap %pap x 0x%08x\n",
  744. + &paddr, (u32)size);
  745. + return -EINVAL;
  746. + }
  747. + if (to_phys)
  748. + rc = raw_copy_from_user(__io_virt(p),
  749. + (void __user *)vaddr, size);
  750. + else
  751. + rc = copy_to_user((void __user *)vaddr,
  752. + __io_virt(p), size);
  753. + iounmap(p);
  754. + if (rc)
  755. + return -EFAULT;
  756. + return 0;
  757. +}
  758. +
  759. +static long e24_copy_user_to_phys(struct e24_device *edev,
  760. + unsigned long vaddr, unsigned long size,
  761. + phys_addr_t paddr, unsigned long flags)
  762. +{
  763. + return _e24_copy_user_phys(edev, vaddr, size, paddr, flags, true);
  764. +}
  765. +
  766. +static long e24_copy_user_from_phys(struct e24_device *edev,
  767. + unsigned long vaddr, unsigned long size,
  768. + phys_addr_t paddr, unsigned long flags)
  769. +{
  770. + return _e24_copy_user_phys(edev, vaddr, size, paddr, flags, false);
  771. +}
  772. +
  773. +static long e24_copy_virt_to_phys(struct e24_device *edev,
  774. + unsigned long flags,
  775. + unsigned long vaddr, unsigned long size,
  776. + phys_addr_t *paddr,
  777. + struct e24_alien_mapping *mapping)
  778. +{
  779. + phys_addr_t phys;
  780. + unsigned long align = clamp(vaddr & -vaddr, 16ul, PAGE_SIZE);
  781. + unsigned long offset = vaddr & (align - 1);
  782. + struct e24_allocation *allocation;
  783. + long rc;
  784. +
  785. + rc = e24_allocate(edev->pool,
  786. + size + align, align, &allocation);
  787. + if (rc < 0)
  788. + return rc;
  789. +
  790. + phys = (allocation->start & -align) | offset;
  791. + if (phys < allocation->start)
  792. + phys += align;
  793. +
  794. + if (flags & E24_FLAG_READ) {
  795. + if (e24_copy_user_to_phys(edev, vaddr,
  796. + size, phys, flags)) {
  797. + e24_allocation_put(allocation);
  798. + return -EFAULT;
  799. + }
  800. + }
  801. +
  802. + *paddr = phys;
  803. + *mapping = (struct e24_alien_mapping){
  804. + .vaddr = vaddr,
  805. + .size = size,
  806. + .paddr = *paddr,
  807. + .allocation = allocation,
  808. + .type = ALIEN_COPY,
  809. + };
  810. + pr_debug("%s: copying to pa: %pap\n", __func__, paddr);
  811. +
  812. + return 0;
  813. +}
  814. +
  815. +static long e24_writeback_alien_mapping(struct e24_device *edev,
  816. + struct e24_alien_mapping *alien_mapping,
  817. + unsigned long flags)
  818. +{
  819. + struct page *page;
  820. + size_t nr_pages;
  821. + size_t i;
  822. + long ret = 0;
  823. +
  824. + switch (alien_mapping->type) {
  825. + case ALIEN_GUP:
  826. + e24_dma_sync_for_cpu(edev,
  827. + alien_mapping->vaddr,
  828. + alien_mapping->paddr,
  829. + alien_mapping->size,
  830. + flags);
  831. + pr_debug("%s: dirtying alien GUP @va = %p, pa = %pap\n",
  832. + __func__, (void __user *)alien_mapping->vaddr,
  833. + &alien_mapping->paddr);
  834. + page = pfn_to_page(__phys_to_pfn(alien_mapping->paddr));
  835. + nr_pages = PFN_UP(alien_mapping->vaddr + alien_mapping->size) -
  836. + PFN_DOWN(alien_mapping->vaddr);
  837. + for (i = 0; i < nr_pages; ++i)
  838. + SetPageDirty(page + i);
  839. + break;
  840. +
  841. + case ALIEN_COPY:
  842. + pr_debug("%s: synchronizing alien copy @pa = %pap back to %p\n",
  843. + __func__, &alien_mapping->paddr,
  844. + (void __user *)alien_mapping->vaddr);
  845. + if (e24_copy_user_from_phys(edev,
  846. + alien_mapping->vaddr,
  847. + alien_mapping->size,
  848. + alien_mapping->paddr,
  849. + flags))
  850. + ret = -EINVAL;
  851. + break;
  852. +
  853. + default:
  854. + break;
  855. + }
  856. + return ret;
  857. +}
  858. +
  859. +static bool vma_needs_cache_ops(struct vm_area_struct *vma)
  860. +{
  861. + pgprot_t prot = vma->vm_page_prot;
  862. +
  863. + return pgprot_val(prot) != pgprot_val(pgprot_noncached(prot)) &&
  864. + pgprot_val(prot) != pgprot_val(pgprot_writecombine(prot));
  865. +}
  866. +
  867. +static void e24_alien_mapping_destroy(struct e24_alien_mapping *alien_mapping)
  868. +{
  869. + switch (alien_mapping->type) {
  870. + case ALIEN_COPY:
  871. + e24_allocation_put(alien_mapping->allocation);
  872. + break;
  873. + default:
  874. + break;
  875. + }
  876. +}
  877. +
  878. +static long __e24_unshare_block(struct file *filp, struct e24_mapping *mapping,
  879. + unsigned long flags)
  880. +{
  881. + long ret = 0;
  882. + struct e24_device *edev = filp->private_data;
  883. +
  884. + switch (mapping->type & ~E24_MAPPING_KERNEL) {
  885. + case E24_MAPPING_NATIVE:
  886. + if (flags & E24_FLAG_WRITE) {
  887. + e24_dma_sync_for_cpu(edev,
  888. + mapping->native.vaddr,
  889. + mapping->native.m_allocation->start,
  890. + mapping->native.m_allocation->size,
  891. + flags);
  892. + }
  893. + e24_allocation_put(mapping->native.m_allocation);
  894. + break;
  895. +
  896. + case E24_MAPPING_ALIEN:
  897. + if (flags & E24_FLAG_WRITE) {
  898. + ret = e24_writeback_alien_mapping(edev,
  899. + &mapping->alien_mapping,
  900. + flags);
  901. + }
  902. + e24_alien_mapping_destroy(&mapping->alien_mapping);
  903. + break;
  904. +
  905. + case E24_MAPPING_KERNEL:
  906. + break;
  907. +
  908. + default:
  909. + break;
  910. + }
  911. +
  912. + mapping->type = E24_MAPPING_NONE;
  913. +
  914. + return ret;
  915. +}
  916. +
  917. +static long __e24_share_block(struct file *filp,
  918. + unsigned long virt, unsigned long size,
  919. + unsigned long flags, phys_addr_t *paddr,
  920. + struct e24_mapping *mapping)
  921. +{
  922. + phys_addr_t phys = ~0ul;
  923. + struct e24_device *edev = filp->private_data;
  924. + struct mm_struct *mm = current->mm;
  925. + struct vm_area_struct *vma = find_vma(mm, virt);
  926. + bool do_cache = true;
  927. + long rc = -EINVAL;
  928. +
  929. + if (!vma) {
  930. + pr_debug("%s: no vma for vaddr/size = 0x%08lx/0x%08lx\n",
  931. + __func__, virt, size);
  932. + return -EINVAL;
  933. + }
  934. +
  935. + if (virt + size < virt || vma->vm_start > virt)
  936. + return -EINVAL;
  937. +
  938. + if (vma && (vma->vm_file == filp)) {
  939. + struct e24_device *vm_file = vma->vm_file->private_data;
  940. + struct e24_allocation *e24_user_allocation = vma->vm_private_data;
  941. +
  942. + phys = vm_file->shared_mem + (vma->vm_pgoff << PAGE_SHIFT) +
  943. + virt - vma->vm_start;
  944. + pr_debug("%s: E24 allocation at 0x%08lx, paddr: %pap\n",
  945. + __func__, virt, &phys);
  946. +
  947. + rc = 0;
  948. + mapping->type = E24_MAPPING_NATIVE;
  949. + mapping->native.m_allocation = e24_user_allocation;
  950. + mapping->native.vaddr = virt;
  951. + atomic_inc(&e24_user_allocation->ref);
  952. + do_cache = vma_needs_cache_ops(vma);
  953. + }
  954. + if (rc < 0) {
  955. + struct e24_alien_mapping *alien_mapping =
  956. + &mapping->alien_mapping;
  957. +
  958. + /* Otherwise this is alien allocation. */
  959. + pr_debug("%s: non-E24 allocation at 0x%08lx\n",
  960. + __func__, virt);
  961. +
  962. + rc = e24_copy_virt_to_phys(edev, flags,
  963. + virt, size, &phys,
  964. + alien_mapping);
  965. +
  966. + if (rc < 0) {
  967. + pr_debug("%s: couldn't map virt to phys\n",
  968. + __func__);
  969. + return -EINVAL;
  970. + }
  971. +
  972. + phys = alien_mapping->paddr +
  973. + virt - alien_mapping->vaddr;
  974. +
  975. + mapping->type = E24_MAPPING_ALIEN;
  976. + }
  977. +
  978. + *paddr = phys;
  979. + pr_debug("%s: mapping = %p, mapping->type = %d\n",
  980. + __func__, mapping, mapping->type);
  981. +
  982. + return 0;
  983. +}
  984. +
  985. +
  986. +static void e24_unmap_request_nowb(struct file *filp, struct e24_ioctl_request *rq)
  987. +{
  988. + if (rq->ioctl_data.in_data_size > E24_DSP_CMD_INLINE_DATA_SIZE)
  989. + __e24_unshare_block(filp, &rq->in_data_mapping, 0);
  990. + if (rq->ioctl_data.out_data_size > E24_DSP_CMD_INLINE_DATA_SIZE)
  991. + __e24_unshare_block(filp, &rq->out_data_mapping, 0);
  992. +}
  993. +
  994. +static long e24_unmap_request(struct file *filp, struct e24_ioctl_request *rq)
  995. +{
  996. + long ret = 0;
  997. +
  998. + if (rq->ioctl_data.in_data_size > E24_DSP_CMD_INLINE_DATA_SIZE)
  999. + __e24_unshare_block(filp, &rq->in_data_mapping, E24_FLAG_READ);
  1000. +
  1001. + if (rq->ioctl_data.out_data_size > E24_DSP_CMD_INLINE_DATA_SIZE) {
  1002. + ret = __e24_unshare_block(filp, &rq->out_data_mapping,
  1003. + E24_FLAG_WRITE);
  1004. + if (ret < 0)
  1005. + pr_debug("%s: out_data could not be unshared\n", __func__);
  1006. +
  1007. + } else {
  1008. + if (copy_to_user((void __user *)(unsigned long)rq->ioctl_data.out_data_addr,
  1009. + rq->out_data,
  1010. + rq->ioctl_data.out_data_size)) {
  1011. + pr_debug("%s: out_data could not be copied\n", __func__);
  1012. + ret = -EFAULT;
  1013. + }
  1014. + }
  1015. +
  1016. + return ret;
  1017. +}
  1018. +
  1019. +static bool e24_cmd_complete(struct e24_comm *share_com)
  1020. +{
  1021. + struct e24_dsp_cmd __iomem *cmd = share_com->comm;
  1022. + u32 flags = __raw_readl(&cmd->flags);
  1023. +
  1024. + rmb();
  1025. + return (flags & (E24_CMD_FLAG_REQUEST_VALID |
  1026. + E24_CMD_FLAG_RESPONSE_VALID)) ==
  1027. + (E24_CMD_FLAG_REQUEST_VALID |
  1028. + E24_CMD_FLAG_RESPONSE_VALID);
  1029. +}
  1030. +
  1031. +static long e24_complete_poll(struct e24_device *edev, struct e24_comm *comm,
  1032. + bool (*cmd_complete)(struct e24_comm *p), struct e24_ioctl_request *rq)
  1033. +{
  1034. + unsigned long deadline = jiffies + firmware_command_timeout * HZ;
  1035. +
  1036. + do {
  1037. + if (cmd_complete(comm)) {
  1038. + pr_debug("%s: poll complete.\n", __func__);
  1039. + return 0;
  1040. + }
  1041. + schedule();
  1042. + } while (time_before(jiffies, deadline));
  1043. +
  1044. + pr_debug("%s: poll complete cmd timeout.\n", __func__);
  1045. +
  1046. + return -EBUSY;
  1047. +}
  1048. +
  1049. +static long e24_map_request(struct file *filp, struct e24_ioctl_request *rq, struct mm_struct *mm)
  1050. +{
  1051. + long ret = 0;
  1052. +
  1053. + mmap_read_lock(mm);
  1054. + if (rq->ioctl_data.in_data_size > E24_DSP_CMD_INLINE_DATA_SIZE) {
  1055. + ret = __e24_share_block(filp, rq->ioctl_data.in_data_addr,
  1056. + rq->ioctl_data.in_data_size,
  1057. + E24_FLAG_READ, &rq->in_data_phys,
  1058. + &rq->in_data_mapping);
  1059. + if (ret < 0) {
  1060. + pr_debug("%s: in_data could not be shared\n", __func__);
  1061. + goto share_err;
  1062. + }
  1063. + } else {
  1064. + if (copy_from_user(rq->in_data,
  1065. + (void __user *)(unsigned long)rq->ioctl_data.in_data_addr,
  1066. + rq->ioctl_data.in_data_size)) {
  1067. + pr_debug("%s: in_data could not be copied\n",
  1068. + __func__);
  1069. + ret = -EFAULT;
  1070. + goto share_err;
  1071. + }
  1072. + }
  1073. +
  1074. + if (rq->ioctl_data.out_data_size > E24_DSP_CMD_INLINE_DATA_SIZE) {
  1075. + ret = __e24_share_block(filp, rq->ioctl_data.out_data_addr,
  1076. + rq->ioctl_data.out_data_size,
  1077. + E24_FLAG_WRITE, &rq->out_data_phys,
  1078. + &rq->out_data_mapping);
  1079. + if (ret < 0) {
  1080. + pr_debug("%s: out_data could not be shared\n",
  1081. + __func__);
  1082. + goto share_err;
  1083. + }
  1084. + }
  1085. +share_err:
  1086. + mmap_read_unlock(mm);
  1087. + if (ret < 0)
  1088. + e24_unmap_request_nowb(filp, rq);
  1089. + return ret;
  1090. +
  1091. +}
  1092. +
  1093. +static void e24_fill_hw_request(struct e24_dsp_cmd __iomem *cmd,
  1094. + struct e24_ioctl_request *rq,
  1095. + const struct e24_address_map *map)
  1096. +{
  1097. + __raw_writel(rq->ioctl_data.in_data_size, &cmd->in_data_size);
  1098. + __raw_writel(rq->ioctl_data.out_data_size, &cmd->out_data_size);
  1099. +
  1100. + if (rq->ioctl_data.in_data_size > E24_DSP_CMD_INLINE_DATA_SIZE)
  1101. + __raw_writel(e24_translate_to_dsp(map, rq->in_data_phys),
  1102. + &cmd->in_data_addr);
  1103. + else
  1104. + e24_comm_write(&cmd->in_data, rq->in_data,
  1105. + rq->ioctl_data.in_data_size);
  1106. +
  1107. + if (rq->ioctl_data.out_data_size > E24_DSP_CMD_INLINE_DATA_SIZE)
  1108. + __raw_writel(e24_translate_to_dsp(map, rq->out_data_phys),
  1109. + &cmd->out_data_addr);
  1110. +
  1111. + wmb();
  1112. + /* update flags */
  1113. + __raw_writel(rq->ioctl_data.flags, &cmd->flags);
  1114. +}
  1115. +
  1116. +static long e24_complete_hw_request(struct e24_dsp_cmd __iomem *cmd,
  1117. + struct e24_ioctl_request *rq)
  1118. +{
  1119. + u32 flags = __raw_readl(&cmd->flags);
  1120. +
  1121. + if (rq->ioctl_data.out_data_size <= E24_DSP_CMD_INLINE_DATA_SIZE)
  1122. + e24_comm_read(&cmd->out_data, rq->out_data,
  1123. + rq->ioctl_data.out_data_size);
  1124. +
  1125. + __raw_writel(0, &cmd->flags);
  1126. +
  1127. + return (flags & E24_QUEUE_VALID_FLAGS) ? -ENXIO : 0;
  1128. +}
  1129. +
  1130. +static long e24_ioctl_submit_task(struct file *filp,
  1131. + struct e24_ioctl_user __user *msg)
  1132. +{
  1133. + struct e24_device *edev = filp->private_data;
  1134. + struct e24_comm *queue = edev->queue;
  1135. + struct e24_ioctl_request rq_data, *vrq = &rq_data;
  1136. + void *data = &edev->mbox_data;
  1137. + int ret = -ENOMEM;
  1138. + int irq_mode = edev->irq_mode;
  1139. +
  1140. + if (copy_from_user(&vrq->ioctl_data, msg, sizeof(*msg)))
  1141. + return -EFAULT;
  1142. +
  1143. + if (vrq->ioctl_data.flags & ~E24_QUEUE_VALID_FLAGS) {
  1144. + dev_dbg(edev->dev, "%s: invalid flags 0x%08x\n",
  1145. + __func__, vrq->ioctl_data.flags);
  1146. + return -EINVAL;
  1147. + }
  1148. +
  1149. + ret = e24_map_request(filp, vrq, current->mm);
  1150. + if (ret < 0)
  1151. + return ret;
  1152. +
  1153. + mutex_lock(&queue->lock);
  1154. + e24_fill_hw_request(queue->comm, vrq, &edev->address_map);
  1155. +
  1156. + if (irq_mode) {
  1157. + ret = mbox_send_message(edev->tx_channel, data);
  1158. + mbox_chan_txdone(edev->tx_channel, ret);
  1159. + } else {
  1160. + ret = e24_complete_poll(edev, queue, e24_cmd_complete, vrq);
  1161. + }
  1162. +
  1163. + ret = e24_complete_hw_request(queue->comm, vrq);
  1164. + mutex_unlock(&queue->lock);
  1165. +
  1166. + if (ret == 0)
  1167. + ret = e24_unmap_request(filp, vrq);
  1168. +
  1169. + return ret;
  1170. +}
  1171. +
  1172. +static long e24_ioctl_get_channel(struct file *filp,
  1173. + void __user *msg)
  1174. +{
  1175. + struct e24_device *edev = filp->private_data;
  1176. +
  1177. + if (edev->tx_channel == NULL)
  1178. + edev->tx_channel = starfive_mbox_request_channel(edev->dev, "tx");
  1179. + if (edev->rx_channel == NULL)
  1180. + edev->rx_channel = starfive_mbox_request_channel(edev->dev, "rx");
  1181. +
  1182. + return 0;
  1183. +}
  1184. +
  1185. +static long e24_ioctl_free_channel(struct file *filp,
  1186. + void __user *msg)
  1187. +{
  1188. + struct e24_device *edev = filp->private_data;
  1189. +
  1190. + if (edev->rx_channel)
  1191. + mbox_free_channel(edev->rx_channel);
  1192. + if (edev->tx_channel)
  1193. + mbox_free_channel(edev->tx_channel);
  1194. +
  1195. + edev->rx_channel = NULL;
  1196. + edev->tx_channel = NULL;
  1197. + return 0;
  1198. +}
  1199. +
  1200. +static void e24_allocation_queue(struct e24_device *edev,
  1201. + struct e24_allocation *e24_pool_allocation)
  1202. +{
  1203. + spin_lock(&edev->busy_list_lock);
  1204. +
  1205. + e24_pool_allocation->next = edev->busy_list;
  1206. + edev->busy_list = e24_pool_allocation;
  1207. +
  1208. + spin_unlock(&edev->busy_list_lock);
  1209. +}
  1210. +
  1211. +static struct e24_allocation *e24_allocation_dequeue(struct e24_device *edev,
  1212. + phys_addr_t paddr, u32 size)
  1213. +{
  1214. + struct e24_allocation **pcur;
  1215. + struct e24_allocation *cur;
  1216. +
  1217. + spin_lock(&edev->busy_list_lock);
  1218. +
  1219. + for (pcur = &edev->busy_list; (cur = *pcur); pcur = &((*pcur)->next)) {
  1220. + pr_debug("%s: %pap / %pap x %d\n", __func__, &paddr, &cur->start, cur->size);
  1221. + if (paddr >= cur->start && paddr + size - cur->start <= cur->size) {
  1222. + *pcur = cur->next;
  1223. + break;
  1224. + }
  1225. + }
  1226. +
  1227. + spin_unlock(&edev->busy_list_lock);
  1228. + return cur;
  1229. +}
  1230. +
  1231. +static int e24_mmap(struct file *filp, struct vm_area_struct *vma)
  1232. +{
  1233. + int err;
  1234. + struct e24_device *edev = filp->private_data;
  1235. + unsigned long pfn = vma->vm_pgoff + PFN_DOWN(edev->shared_mem);
  1236. + struct e24_allocation *e24_user_allocation;
  1237. +
  1238. + e24_user_allocation = e24_allocation_dequeue(filp->private_data,
  1239. + pfn << PAGE_SHIFT,
  1240. + vma->vm_end - vma->vm_start);
  1241. + if (e24_user_allocation) {
  1242. + pgprot_t prot = vma->vm_page_prot;
  1243. +
  1244. + if (!e24_cacheable(edev, pfn,
  1245. + PFN_DOWN(vma->vm_end - vma->vm_start))) {
  1246. + prot = pgprot_writecombine(prot);
  1247. + vma->vm_page_prot = prot;
  1248. + }
  1249. +
  1250. + err = remap_pfn_range(vma, vma->vm_start, pfn,
  1251. + vma->vm_end - vma->vm_start,
  1252. + prot);
  1253. +
  1254. + vma->vm_private_data = e24_user_allocation;
  1255. + vma->vm_ops = &e24_vm_ops;
  1256. + } else {
  1257. + err = -EINVAL;
  1258. + }
  1259. +
  1260. + return err;
  1261. +}
  1262. +
  1263. +static long e24_ioctl_free(struct file *filp,
  1264. + struct e24_ioctl_alloc __user *p)
  1265. +{
  1266. + struct mm_struct *mm = current->mm;
  1267. + struct e24_ioctl_alloc user_ioctl_alloc;
  1268. + struct vm_area_struct *vma;
  1269. + unsigned long start;
  1270. +
  1271. + if (copy_from_user(&user_ioctl_alloc, p, sizeof(*p)))
  1272. + return -EFAULT;
  1273. +
  1274. + start = user_ioctl_alloc.addr;
  1275. + pr_debug("%s: virt_addr = 0x%08lx\n", __func__, start);
  1276. +
  1277. + mmap_read_lock(mm);
  1278. + vma = find_vma(mm, start);
  1279. +
  1280. + if (vma && vma->vm_file == filp &&
  1281. + vma->vm_start <= start && start < vma->vm_end) {
  1282. + size_t size;
  1283. +
  1284. + start = vma->vm_start;
  1285. + size = vma->vm_end - vma->vm_start;
  1286. + mmap_read_unlock(mm);
  1287. + pr_debug("%s: 0x%lx x %zu\n", __func__, start, size);
  1288. + return vm_munmap(start, size);
  1289. + }
  1290. + pr_debug("%s: no vma/bad vma for vaddr = 0x%08lx\n", __func__, start);
  1291. + mmap_read_unlock(mm);
  1292. +
  1293. + return -EINVAL;
  1294. +}
  1295. +
  1296. +static long e24_ioctl_alloc(struct file *filp,
  1297. + struct e24_ioctl_alloc __user *p)
  1298. +{
  1299. + struct e24_device *edev = filp->private_data;
  1300. + struct e24_allocation *e24_pool_allocation;
  1301. + unsigned long vaddr;
  1302. + struct e24_ioctl_alloc user_ioctl_alloc;
  1303. + long err;
  1304. +
  1305. + if (copy_from_user(&user_ioctl_alloc, p, sizeof(*p)))
  1306. + return -EFAULT;
  1307. +
  1308. + pr_debug("%s: size = %d, align = %x\n", __func__,
  1309. + user_ioctl_alloc.size, user_ioctl_alloc.align);
  1310. +
  1311. + err = e24_allocate(edev->pool,
  1312. + user_ioctl_alloc.size,
  1313. + user_ioctl_alloc.align,
  1314. + &e24_pool_allocation);
  1315. + if (err)
  1316. + return err;
  1317. +
  1318. + e24_allocation_queue(edev, e24_pool_allocation);
  1319. +
  1320. + vaddr = vm_mmap(filp, 0, e24_pool_allocation->size,
  1321. + PROT_READ | PROT_WRITE, MAP_SHARED,
  1322. + e24_allocation_offset(e24_pool_allocation));
  1323. +
  1324. + user_ioctl_alloc.addr = vaddr;
  1325. +
  1326. + if (copy_to_user(p, &user_ioctl_alloc, sizeof(*p))) {
  1327. + vm_munmap(vaddr, user_ioctl_alloc.size);
  1328. + return -EFAULT;
  1329. + }
  1330. + return 0;
  1331. +}
  1332. +
  1333. +static long e24_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1334. +{
  1335. + long retval;
  1336. +
  1337. + switch (cmd) {
  1338. + case E24_IOCTL_SEND:
  1339. + retval = e24_ioctl_submit_task(filp, (struct e24_ioctl_user *)arg);
  1340. + break;
  1341. + case E24_IOCTL_GET_CHANNEL:
  1342. + retval = e24_ioctl_get_channel(filp, NULL);
  1343. + break;
  1344. + case E24_IOCTL_FREE_CHANNEL:
  1345. + retval = e24_ioctl_free_channel(filp, NULL);
  1346. + break;
  1347. + case E24_IOCTL_ALLOC:
  1348. + retval = e24_ioctl_alloc(filp, (struct e24_ioctl_alloc __user *)arg);
  1349. + break;
  1350. + case E24_IOCTL_FREE:
  1351. + retval = e24_ioctl_free(filp, (struct e24_ioctl_alloc __user *)arg);
  1352. + break;
  1353. + default:
  1354. + retval = -EINVAL;
  1355. + break;
  1356. + }
  1357. + return retval;
  1358. +}
  1359. +
  1360. +static const struct file_operations e24_fops = {
  1361. + .owner = THIS_MODULE,
  1362. + .unlocked_ioctl = e24_ioctl,
  1363. +#ifdef CONFIG_COMPAT
  1364. + .compat_ioctl = e24_ioctl,
  1365. +#endif
  1366. + .open = e24_open,
  1367. + .release = e24_release,
  1368. + .write = mbox_e24_message_write,
  1369. + .mmap = e24_mmap,
  1370. +};
  1371. +
  1372. +void mailbox_task(struct platform_device *pdev)
  1373. +{
  1374. + struct e24_device *e24_dev = platform_get_drvdata(pdev);
  1375. +
  1376. + e24_dev->tx_channel = starfive_mbox_request_channel(e24_dev->dev, "tx");
  1377. + e24_dev->rx_channel = starfive_mbox_request_channel(e24_dev->dev, "rx");
  1378. + pr_debug("%s:%d.%#llx\n", __func__, __LINE__, (u64)e24_dev->rx_channel);
  1379. +}
  1380. +
  1381. +static long e24_init_mem_pool(struct platform_device *pdev, struct e24_device *devs)
  1382. +{
  1383. + struct resource *mem;
  1384. +
  1385. + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecmd");
  1386. + if (!mem)
  1387. + return -ENODEV;
  1388. +
  1389. + devs->comm_phys = mem->start;
  1390. + devs->comm = devm_ioremap(&pdev->dev, mem->start, mem->end - mem->start);
  1391. + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "espace");
  1392. + if (!mem)
  1393. + return -ENODEV;
  1394. +
  1395. + devs->shared_mem = mem->start;
  1396. + devs->shared_size = resource_size(mem);
  1397. + pr_debug("%s:%d.%llx,%llx\n", __func__, __LINE__, devs->comm_phys, devs->shared_mem);
  1398. + return e24_init_private_pool(&devs->pool, devs->shared_mem, devs->shared_size);
  1399. +}
  1400. +
  1401. +static int e24_init_address_map(struct device *dev,
  1402. + struct e24_address_map *map)
  1403. +{
  1404. +#if IS_ENABLED(CONFIG_OF)
  1405. + struct device_node *pnode = dev->of_node;
  1406. + struct device_node *node;
  1407. + int rlen, off;
  1408. + const __be32 *ranges = of_get_property(pnode, "ranges", &rlen);
  1409. + int na, pna, ns;
  1410. + int i;
  1411. +
  1412. + if (!ranges) {
  1413. + dev_dbg(dev, "%s: no 'ranges' property in the device tree, no translation at that level\n",
  1414. + __func__);
  1415. + goto empty;
  1416. + }
  1417. +
  1418. + node = of_get_next_child(pnode, NULL);
  1419. + if (!node) {
  1420. + dev_warn(dev, "%s: no child node found in the device tree, no translation at that level\n",
  1421. + __func__);
  1422. + goto empty;
  1423. + }
  1424. +
  1425. + na = of_n_addr_cells(node);
  1426. + ns = of_n_size_cells(node);
  1427. + pna = of_n_addr_cells(pnode);
  1428. +
  1429. + rlen /= 4;
  1430. + map->n = rlen / (na + pna + ns);
  1431. + map->entry = kmalloc_array(map->n, sizeof(*map->entry), GFP_KERNEL);
  1432. + if (!map->entry)
  1433. + return -ENOMEM;
  1434. +
  1435. + dev_dbg(dev,
  1436. + "%s: na = %d, pna = %d, ns = %d, rlen = %d cells, n = %d\n",
  1437. + __func__, na, pna, ns, rlen, map->n);
  1438. +
  1439. + for (off = 0, i = 0; off < rlen; off += na + pna + ns, ++i) {
  1440. + map->entry[i].src_addr = of_translate_address(node,
  1441. + ranges + off);
  1442. + map->entry[i].dst_addr = of_read_number(ranges + off, na);
  1443. + map->entry[i].size = of_read_number(ranges + off + na + pna, ns);
  1444. + dev_dbg(dev,
  1445. + " src_addr = 0x%llx, dst_addr = 0x%lx, size = 0x%lx\n",
  1446. + (unsigned long long)map->entry[i].src_addr,
  1447. + (unsigned long)map->entry[i].dst_addr,
  1448. + (unsigned long)map->entry[i].size);
  1449. + }
  1450. + sort(map->entry, map->n, sizeof(*map->entry), e24_compare_address_sort, NULL);
  1451. +
  1452. + of_node_put(node);
  1453. + return 0;
  1454. +
  1455. +empty:
  1456. +#endif
  1457. + map->n = 1;
  1458. + map->entry = kmalloc(sizeof(*map->entry), GFP_KERNEL);
  1459. + map->entry->src_addr = 0;
  1460. + map->entry->dst_addr = 0;
  1461. + map->entry->size = ~0u;
  1462. + return -ENOMEM;
  1463. +}
  1464. +
  1465. +typedef long e24_init_function(struct platform_device *pdev);
  1466. +
  1467. +static inline void e24_init(struct e24_device *e24_hw)
  1468. +{
  1469. + if (e24_hw->hw_ops->init)
  1470. + e24_hw->hw_ops->init(e24_hw->hw_arg);
  1471. +}
  1472. +
  1473. +static inline void e24_release_e24(struct e24_device *e24_hw)
  1474. +{
  1475. + if (e24_hw->hw_ops->reset)
  1476. + e24_hw->hw_ops->release(e24_hw->hw_arg);
  1477. +}
  1478. +
  1479. +static inline void e24_halt_e24(struct e24_device *e24_hw)
  1480. +{
  1481. + if (e24_hw->hw_ops->halt)
  1482. + e24_hw->hw_ops->halt(e24_hw->hw_arg);
  1483. +}
  1484. +
  1485. +static inline int e24_enable_e24(struct e24_device *e24_hw)
  1486. +{
  1487. + if (e24_hw->hw_ops->enable)
  1488. + return e24_hw->hw_ops->enable(e24_hw->hw_arg);
  1489. + else
  1490. + return -EINVAL;
  1491. +}
  1492. +
  1493. +static inline void e24_reset_e24(struct e24_device *e24_hw)
  1494. +{
  1495. + if (e24_hw->hw_ops->reset)
  1496. + e24_hw->hw_ops->reset(e24_hw->hw_arg);
  1497. +}
  1498. +
  1499. +static inline void e24_disable_e24(struct e24_device *e24_hw)
  1500. +{
  1501. + if (e24_hw->hw_ops->disable)
  1502. + e24_hw->hw_ops->disable(e24_hw->hw_arg);
  1503. +}
  1504. +
  1505. +static inline void e24_sendirq_e24(struct e24_device *e24_hw)
  1506. +{
  1507. + if (e24_hw->hw_ops->send_irq)
  1508. + e24_hw->hw_ops->send_irq(e24_hw->hw_arg);
  1509. +}
  1510. +
  1511. +irqreturn_t e24_irq_handler(int irq, struct e24_device *e24_hw)
  1512. +{
  1513. + dev_dbg(e24_hw->dev, "%s\n", __func__);
  1514. + complete(&e24_hw->completion);
  1515. +
  1516. + return IRQ_HANDLED;
  1517. +}
  1518. +EXPORT_SYMBOL(e24_irq_handler);
  1519. +
  1520. +static phys_addr_t e24_translate_to_cpu(struct e24_device *mail, Elf32_Phdr *phdr)
  1521. +{
  1522. + phys_addr_t res;
  1523. + __be32 addr = cpu_to_be32((u32)phdr->p_paddr);
  1524. + struct device_node *node =
  1525. + of_get_next_child(mail->dev->of_node, NULL);
  1526. +
  1527. + if (!node)
  1528. + node = mail->dev->of_node;
  1529. +
  1530. + res = of_translate_address(node, &addr);
  1531. +
  1532. + if (node != mail->dev->of_node)
  1533. + of_node_put(node);
  1534. + return res;
  1535. +}
  1536. +
  1537. +static int e24_load_segment_to_sysmem(struct e24_device *e24, Elf32_Phdr *phdr)
  1538. +{
  1539. + phys_addr_t pa = e24_translate_to_cpu(e24, phdr);
  1540. + struct page *page = pfn_to_page(__phys_to_pfn(pa));
  1541. + size_t page_offs = pa & ~PAGE_MASK;
  1542. + size_t offs;
  1543. +
  1544. + for (offs = 0; offs < phdr->p_memsz; ++page) {
  1545. + void *p = kmap(page);
  1546. + size_t sz;
  1547. +
  1548. + if (!p)
  1549. + return -ENOMEM;
  1550. +
  1551. + page_offs &= ~PAGE_MASK;
  1552. + sz = PAGE_SIZE - page_offs;
  1553. +
  1554. + if (offs < phdr->p_filesz) {
  1555. + size_t copy_sz = sz;
  1556. +
  1557. + if (phdr->p_filesz - offs < copy_sz)
  1558. + copy_sz = phdr->p_filesz - offs;
  1559. +
  1560. + copy_sz = ALIGN(copy_sz, 4);
  1561. + memcpy(p + page_offs,
  1562. + (void *)e24->firmware->data +
  1563. + phdr->p_offset + offs,
  1564. + copy_sz);
  1565. + page_offs += copy_sz;
  1566. + offs += copy_sz;
  1567. + sz -= copy_sz;
  1568. + }
  1569. +
  1570. + if (offs < phdr->p_memsz && sz) {
  1571. + if (phdr->p_memsz - offs < sz)
  1572. + sz = phdr->p_memsz - offs;
  1573. +
  1574. + sz = ALIGN(sz, 4);
  1575. + memset(p + page_offs, 0, sz);
  1576. + page_offs += sz;
  1577. + offs += sz;
  1578. + }
  1579. + kunmap(page);
  1580. + }
  1581. + dma_sync_single_for_device(e24->dev, pa, phdr->p_memsz, DMA_TO_DEVICE);
  1582. + return 0;
  1583. +}
  1584. +
  1585. +static int e24_load_segment_to_iomem(struct e24_device *e24, Elf32_Phdr *phdr)
  1586. +{
  1587. + phys_addr_t pa = e24_translate_to_cpu(e24, phdr);
  1588. + void __iomem *p = ioremap(pa, phdr->p_memsz);
  1589. +
  1590. + if (!p) {
  1591. + dev_err(e24->dev, "couldn't ioremap %pap x 0x%08x\n",
  1592. + &pa, (u32)phdr->p_memsz);
  1593. + return -EINVAL;
  1594. + }
  1595. + if (e24->hw_ops->memcpy_tohw)
  1596. + e24->hw_ops->memcpy_tohw(p, (void *)e24->firmware->data +
  1597. + phdr->p_offset, phdr->p_filesz);
  1598. + else
  1599. + memcpy_toio(p, (void *)e24->firmware->data + phdr->p_offset,
  1600. + ALIGN(phdr->p_filesz, 4));
  1601. +
  1602. + if (e24->hw_ops->memset_hw)
  1603. + e24->hw_ops->memset_hw(p + phdr->p_filesz, 0,
  1604. + phdr->p_memsz - phdr->p_filesz);
  1605. + else
  1606. + memset_io(p + ALIGN(phdr->p_filesz, 4), 0,
  1607. + ALIGN(phdr->p_memsz - ALIGN(phdr->p_filesz, 4), 4));
  1608. +
  1609. + iounmap(p);
  1610. + return 0;
  1611. +}
  1612. +
  1613. +static int e24_load_firmware(struct e24_device *e24_dev)
  1614. +{
  1615. + Elf32_Ehdr *ehdr = (Elf32_Ehdr *)e24_dev->firmware->data;
  1616. + u32 *dai = (u32 *)e24_dev->firmware->data;
  1617. + int i;
  1618. +
  1619. + pr_debug("elf size:%ld,%x,%x\n", e24_dev->firmware->size, dai[0], dai[1]);
  1620. + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  1621. + dev_err(e24_dev->dev, "bad firmware ELF magic\n");
  1622. + return -EINVAL;
  1623. + }
  1624. +
  1625. + if (ehdr->e_type != ET_EXEC) {
  1626. + dev_err(e24_dev->dev, "bad firmware ELF type\n");
  1627. + return -EINVAL;
  1628. + }
  1629. +
  1630. + if (ehdr->e_machine != EM_RISCV) {
  1631. + dev_err(e24_dev->dev, "bad firmware ELF machine\n");
  1632. + return -EINVAL;
  1633. + }
  1634. +
  1635. + if (ehdr->e_phoff >= e24_dev->firmware->size ||
  1636. + ehdr->e_phoff +
  1637. + ehdr->e_phentsize * ehdr->e_phnum > e24_dev->firmware->size) {
  1638. + dev_err(e24_dev->dev, "bad firmware ELF PHDR information\n");
  1639. + return -EINVAL;
  1640. + }
  1641. +
  1642. + for (i = ehdr->e_phnum; i >= 0 ; i--) {
  1643. + Elf32_Phdr *phdr = (void *)e24_dev->firmware->data +
  1644. + ehdr->e_phoff + i * ehdr->e_phentsize;
  1645. + phys_addr_t pa;
  1646. + int rc;
  1647. +
  1648. + /* Only load non-empty loadable segments, R/W/X */
  1649. + if (!(phdr->p_type == PT_LOAD &&
  1650. + (phdr->p_flags & (PF_X | PF_R | PF_W)) &&
  1651. + phdr->p_memsz > 0))
  1652. + continue;
  1653. +
  1654. + if (phdr->p_offset >= e24_dev->firmware->size ||
  1655. + phdr->p_offset + phdr->p_filesz > e24_dev->firmware->size) {
  1656. + dev_err(e24_dev->dev, "bad firmware ELF program header entry %d\n", i);
  1657. + return -EINVAL;
  1658. + }
  1659. +
  1660. + pa = e24_translate_to_cpu(e24_dev, phdr);
  1661. + if (pa == (phys_addr_t)OF_BAD_ADDR) {
  1662. + dev_err(e24_dev->dev,
  1663. + "device address 0x%08x could not be mapped to host physical address",
  1664. + (u32)phdr->p_paddr);
  1665. + return -EINVAL;
  1666. + }
  1667. + dev_dbg(e24_dev->dev, "loading segment %d (device 0x%08x) to physical %pap\n",
  1668. + i, (u32)phdr->p_paddr, &pa);
  1669. +
  1670. + if (pfn_valid(__phys_to_pfn(pa)))
  1671. + rc = e24_load_segment_to_sysmem(e24_dev, phdr);
  1672. + else
  1673. + rc = e24_load_segment_to_iomem(e24_dev, phdr);
  1674. +
  1675. + if (rc < 0)
  1676. + return rc;
  1677. + }
  1678. + return 0;
  1679. +}
  1680. +
  1681. +static int e24_boot_firmware(struct device *dev)
  1682. +{
  1683. + int ret;
  1684. + struct e24_device *e24_dev = dev_get_drvdata(dev);
  1685. +
  1686. + if (e24_dev->firmware_name) {
  1687. + ret = request_firmware(&e24_dev->firmware, e24_dev->firmware_name, e24_dev->dev);
  1688. +
  1689. + if (ret < 0)
  1690. + return ret;
  1691. +
  1692. + ret = e24_load_firmware(e24_dev);
  1693. + if (ret < 0) {
  1694. + release_firmware(e24_dev->firmware);
  1695. + return ret;
  1696. + }
  1697. +
  1698. + }
  1699. +
  1700. + release_firmware(e24_dev->firmware);
  1701. + ret = e24_enable_e24(e24_dev);
  1702. + if (ret < 0)
  1703. + return ret;
  1704. +
  1705. + e24_reset_e24(e24_dev);
  1706. + e24_release_e24(e24_dev);
  1707. + e24_synchronize(e24_dev);
  1708. +
  1709. + return ret;
  1710. +}
  1711. +
  1712. +int e24_runtime_suspend(struct device *dev)
  1713. +{
  1714. + struct e24_device *e24_dev = dev_get_drvdata(dev);
  1715. +
  1716. + e24_halt_e24(e24_dev);
  1717. + e24_disable_e24(e24_dev);
  1718. +
  1719. + return 0;
  1720. +}
  1721. +
  1722. +long e24_init_v0(struct platform_device *pdev)
  1723. +{
  1724. + long ret = -EINVAL;
  1725. + int nodeid, i;
  1726. + struct e24_hw_arg *hw_arg;
  1727. + struct e24_device *e24_dev;
  1728. + char nodename[sizeof("eboot") + 2 * sizeof(int)];
  1729. +
  1730. + e24_dev = devm_kzalloc(&pdev->dev, sizeof(*e24_dev), GFP_KERNEL);
  1731. + if (e24_dev == NULL) {
  1732. + ret = -ENOMEM;
  1733. + return ret;
  1734. + }
  1735. +
  1736. + hw_arg = devm_kzalloc(&pdev->dev, sizeof(*hw_arg), GFP_KERNEL);
  1737. + if (hw_arg == NULL)
  1738. + return ret;
  1739. +
  1740. + platform_set_drvdata(pdev, e24_dev);
  1741. + e24_dev->dev = &pdev->dev;
  1742. + e24_dev->hw_arg = hw_arg;
  1743. + e24_dev->hw_ops = e24_get_hw_ops();
  1744. + e24_dev->nodeid = -1;
  1745. + hw_arg->e24 = e24_dev;
  1746. +
  1747. + ret = e24_init_mem_pool(pdev, e24_dev);
  1748. + if (ret < 0)
  1749. + goto err;
  1750. +
  1751. + ret = e24_init_address_map(e24_dev->dev, &e24_dev->address_map);
  1752. + if (ret < 0)
  1753. + goto err_free_pool;
  1754. +
  1755. + e24_dev->n_queues = 1;
  1756. + e24_dev->queue = devm_kmalloc(&pdev->dev,
  1757. + e24_dev->n_queues * sizeof(*e24_dev->queue),
  1758. + GFP_KERNEL);
  1759. + if (e24_dev->queue == NULL)
  1760. + goto err_free_map;
  1761. +
  1762. + for (i = 0; i < e24_dev->n_queues; i++) {
  1763. + mutex_init(&e24_dev->queue[i].lock);
  1764. + e24_dev->queue[i].comm = e24_dev->comm + E24_CMD_STRIDE * i;
  1765. + }
  1766. +
  1767. + ret = of_property_read_u32(pdev->dev.of_node, "irq-mode",
  1768. + &hw_arg->irq_mode);
  1769. + e24_dev->irq_mode = hw_arg->irq_mode;
  1770. + if (hw_arg->irq_mode == 0)
  1771. + dev_info(&pdev->dev, "using polling mode on the device side\n");
  1772. +
  1773. + e24_dev->mbox_data = e24_translate_to_dsp(&e24_dev->address_map, e24_dev->comm_phys);
  1774. + ret = device_property_read_string(e24_dev->dev, "firmware-name",
  1775. + &e24_dev->firmware_name);
  1776. + if (ret == -EINVAL || ret == -ENODATA) {
  1777. + dev_dbg(e24_dev->dev,
  1778. + "no firmware-name property, not loading firmware");
  1779. + } else if (ret < 0) {
  1780. + dev_err(e24_dev->dev, "invalid firmware name (%ld)", ret);
  1781. + goto err_free_map;
  1782. + }
  1783. +
  1784. + e24_init(e24_dev);
  1785. + pm_runtime_set_active(e24_dev->dev);
  1786. + pm_runtime_enable(e24_dev->dev);
  1787. + if (!pm_runtime_enabled(e24_dev->dev)) {
  1788. + ret = e24_boot_firmware(e24_dev->dev);
  1789. + if (ret)
  1790. + goto err_pm_disable;
  1791. + }
  1792. + nodeid = ida_simple_get(&e24_nodeid, 0, 0, GFP_KERNEL);
  1793. + if (nodeid < 0) {
  1794. + ret = nodeid;
  1795. + goto err_pm_disable;
  1796. + }
  1797. +
  1798. + e24_dev->nodeid = nodeid;
  1799. + sprintf(nodename, "eboot%u", nodeid);
  1800. +
  1801. + e24_dev->miscdev = (struct miscdevice){
  1802. + .minor = MISC_DYNAMIC_MINOR,
  1803. + .name = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  1804. + .nodename = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  1805. + .fops = &e24_fops,
  1806. + };
  1807. +
  1808. + ret = misc_register(&e24_dev->miscdev);
  1809. + if (ret < 0)
  1810. + goto err_free_id;
  1811. +
  1812. + return PTR_ERR(e24_dev);
  1813. +err_free_id:
  1814. + ida_simple_remove(&e24_nodeid, nodeid);
  1815. +
  1816. +err_pm_disable:
  1817. + pm_runtime_disable(e24_dev->dev);
  1818. +err_free_map:
  1819. + kfree(e24_dev->address_map.entry);
  1820. +err_free_pool:
  1821. + e24_free_pool(e24_dev->pool);
  1822. +err:
  1823. + dev_err(&pdev->dev, "%s: ret = %ld\n", __func__, ret);
  1824. + return ret;
  1825. +}
  1826. +
  1827. +static const struct of_device_id e24_of_match[] = {
  1828. + {
  1829. + .compatible = "starfive,e24",
  1830. + .data = e24_init_v0,
  1831. + },
  1832. + {
  1833. + },
  1834. +};
  1835. +MODULE_DEVICE_TABLE(of, e24_of_match);
  1836. +
  1837. +int e24_deinit(struct platform_device *pdev)
  1838. +{
  1839. + struct e24_device *e24_dev = platform_get_drvdata(pdev);
  1840. +
  1841. + pm_runtime_disable(&pdev->dev);
  1842. + if (!pm_runtime_status_suspended(e24_dev->dev))
  1843. + e24_runtime_suspend(e24_dev->dev);
  1844. +
  1845. + misc_deregister(&e24_dev->miscdev);
  1846. + e24_free_pool(e24_dev->pool);
  1847. + kfree(e24_dev->address_map.entry);
  1848. + ida_simple_remove(&e24_nodeid, e24_dev->nodeid);
  1849. +
  1850. + if (e24_dev->rx_channel)
  1851. + mbox_free_channel(e24_dev->rx_channel);
  1852. + if (e24_dev->tx_channel)
  1853. + mbox_free_channel(e24_dev->tx_channel);
  1854. + return 0;
  1855. +}
  1856. +
  1857. +static int e24_probe(struct platform_device *pdev)
  1858. +{
  1859. + long ret = -EINVAL;
  1860. + const struct of_device_id *match;
  1861. + e24_init_function *init;
  1862. +
  1863. + match = of_match_device(e24_of_match, &pdev->dev);
  1864. + init = match->data;
  1865. + ret = init(pdev);
  1866. +
  1867. + return IS_ERR_VALUE(ret) ? ret : 0;
  1868. +}
  1869. +
  1870. +static void e24_remove(struct platform_device *pdev)
  1871. +{
  1872. + e24_deinit(pdev);
  1873. +}
  1874. +
  1875. +static const struct dev_pm_ops e24_runtime_pm_ops = {
  1876. + SET_RUNTIME_PM_OPS(e24_runtime_suspend,
  1877. + e24_boot_firmware, NULL)
  1878. +};
  1879. +
  1880. +static struct platform_driver e24_driver = {
  1881. + .probe = e24_probe,
  1882. + .remove = e24_remove,
  1883. + .driver = {
  1884. + .name = "e24_boot",
  1885. + .of_match_table = of_match_ptr(e24_of_match),
  1886. + .pm = &e24_runtime_pm_ops,
  1887. + },
  1888. +};
  1889. +
  1890. +module_platform_driver(e24_driver);
  1891. +
  1892. +MODULE_DESCRIPTION("StarFive e24 driver");
  1893. +MODULE_AUTHOR("Shanlong Li <[email protected]>");
  1894. +MODULE_LICENSE("GPL");
  1895. --- /dev/null
  1896. +++ b/drivers/e24/starfive_e24.h
  1897. @@ -0,0 +1,159 @@
  1898. +/* SPDX-License-Identifier: GPL-2.0 */
  1899. +#ifndef __STARFIVE_E24_H__
  1900. +#define __STARFIVE_E24_H__
  1901. +
  1902. +#include <linux/types.h>
  1903. +#include <linux/completion.h>
  1904. +#include <linux/miscdevice.h>
  1905. +#include <linux/mutex.h>
  1906. +#include <linux/irqreturn.h>
  1907. +#include <linux/platform_device.h>
  1908. +
  1909. +#define E24_IOCTL_MAGIC 'e'
  1910. +#define E24_IOCTL_SEND _IO(E24_IOCTL_MAGIC, 1)
  1911. +#define E24_IOCTL_RECV _IO(E24_IOCTL_MAGIC, 2)
  1912. +#define E24_IOCTL_GET_CHANNEL _IO(E24_IOCTL_MAGIC, 3)
  1913. +#define E24_IOCTL_FREE_CHANNEL _IO(E24_IOCTL_MAGIC, 4)
  1914. +#define E24_IOCTL_ALLOC _IO(E24_IOCTL_MAGIC, 5)
  1915. +#define E24_IOCTL_FREE _IO(E24_IOCTL_MAGIC, 6)
  1916. +
  1917. +#define E24_DSP_CMD_INLINE_DATA_SIZE 16
  1918. +#define E24_NO_TRANSLATION ((u32)~0ul)
  1919. +#define E24_CMD_STRIDE 256
  1920. +
  1921. +#define E24_MEM_MAP
  1922. +enum e24_irq_mode {
  1923. + MAIL_IRQ_NONE,
  1924. + MAIL_IRQ_LEVEL,
  1925. + MAIL_IRQ_MAX
  1926. +};
  1927. +
  1928. +enum {
  1929. + E24_FLAG_READ = 0x1,
  1930. + E24_FLAG_WRITE = 0x2,
  1931. + E24_FLAG_READ_WRITE = 0x3,
  1932. +};
  1933. +
  1934. +enum {
  1935. + E24_QUEUE_FLAG_VALID = 0x4,
  1936. + E24_QUEUE_FLAG_PRIO = 0xff00,
  1937. + E24_QUEUE_FLAG_PRIO_SHIFT = 8,
  1938. +
  1939. + E24_QUEUE_VALID_FLAGS =
  1940. + E24_QUEUE_FLAG_VALID |
  1941. + E24_QUEUE_FLAG_PRIO,
  1942. +};
  1943. +
  1944. +enum {
  1945. + E24_CMD_FLAG_REQUEST_VALID = 0x00000001,
  1946. + E24_CMD_FLAG_RESPONSE_VALID = 0x00000002,
  1947. + E24_CMD_FLAG_REQUEST_NSID = 0x00000004,
  1948. + E24_CMD_FLAG_RESPONSE_DELIVERY_FAIL = 0x00000008,
  1949. +};
  1950. +
  1951. +struct e24_address_map_entry {
  1952. + phys_addr_t src_addr;
  1953. + u32 dst_addr;
  1954. + u32 size;
  1955. +};
  1956. +
  1957. +struct e24_address_map {
  1958. + unsigned int n;
  1959. + struct e24_address_map_entry *entry;
  1960. +};
  1961. +
  1962. +struct e24_alien_mapping {
  1963. + unsigned long vaddr;
  1964. + unsigned long size;
  1965. + phys_addr_t paddr;
  1966. + void *allocation;
  1967. + enum {
  1968. + ALIEN_GUP,
  1969. + ALIEN_PFN_MAP,
  1970. + ALIEN_COPY,
  1971. + } type;
  1972. +};
  1973. +
  1974. +struct e24_mapping {
  1975. + enum {
  1976. + E24_MAPPING_NONE,
  1977. + E24_MAPPING_NATIVE,
  1978. + E24_MAPPING_ALIEN,
  1979. + E24_MAPPING_KERNEL = 0x4,
  1980. + } type;
  1981. + union {
  1982. + struct {
  1983. + struct e24_allocation *m_allocation;
  1984. + unsigned long vaddr;
  1985. + } native;
  1986. + struct e24_alien_mapping alien_mapping;
  1987. + };
  1988. +};
  1989. +
  1990. +struct e24_ioctl_alloc {
  1991. + u32 size;
  1992. + u32 align;
  1993. + u64 addr;
  1994. +};
  1995. +
  1996. +struct e24_comm {
  1997. + struct mutex lock;
  1998. + void __iomem *comm;
  1999. + struct completion completion;
  2000. + u32 priority;
  2001. +};
  2002. +
  2003. +struct e24_device {
  2004. + struct device *dev;
  2005. + const char *firmware_name;
  2006. + const struct firmware *firmware;
  2007. + struct miscdevice miscdev;
  2008. + const struct e24_hw_ops *hw_ops;
  2009. + void *hw_arg;
  2010. + int irq_mode;
  2011. +
  2012. + u32 n_queues;
  2013. + struct completion completion;
  2014. + struct e24_address_map address_map;
  2015. + struct e24_comm *queue;
  2016. + void __iomem *comm;
  2017. + phys_addr_t comm_phys;
  2018. + phys_addr_t shared_mem;
  2019. + phys_addr_t shared_size;
  2020. +
  2021. + u32 mbox_data;
  2022. + int nodeid;
  2023. + spinlock_t busy_list_lock;
  2024. +
  2025. + struct mbox_chan *tx_channel;
  2026. + struct mbox_chan *rx_channel;
  2027. + void *rx_buffer;
  2028. + void *message;
  2029. + struct e24_allocation_pool *pool;
  2030. + struct e24_allocation *busy_list;
  2031. +};
  2032. +
  2033. +struct e24_hw_arg {
  2034. + struct e24_device *e24;
  2035. + phys_addr_t regs_phys;
  2036. + struct clk *clk_rtc;
  2037. + struct clk *clk_core;
  2038. + struct clk *clk_dbg;
  2039. + struct reset_control *rst_core;
  2040. + struct regmap *reg_syscon;
  2041. + enum e24_irq_mode irq_mode;
  2042. +};
  2043. +
  2044. +static inline int e24_compare_address(phys_addr_t addr,
  2045. + const struct e24_address_map_entry *entry)
  2046. +{
  2047. + if (addr < entry->src_addr)
  2048. + return -1;
  2049. + if (addr - entry->src_addr < entry->size)
  2050. + return 0;
  2051. + return 1;
  2052. +}
  2053. +
  2054. +irqreturn_t e24_irq_handler(int irq, struct e24_device *e24_hw);
  2055. +
  2056. +#endif
  2057. --- /dev/null
  2058. +++ b/drivers/e24/starfive_e24_hw.c
  2059. @@ -0,0 +1,134 @@
  2060. +// SPDX-License-Identifier: GPL-2.0
  2061. +#include <linux/delay.h>
  2062. +#include <linux/interrupt.h>
  2063. +#include <linux/idr.h>
  2064. +#include <linux/io.h>
  2065. +#include <linux/slab.h>
  2066. +#include <linux/of.h>
  2067. +#include <linux/clk.h>
  2068. +#include <linux/reset.h>
  2069. +#include <linux/module.h>
  2070. +#include <linux/mfd/syscon.h>
  2071. +#include <linux/regmap.h>
  2072. +#include "starfive_e24.h"
  2073. +#include "starfive_e24_hw.h"
  2074. +
  2075. +#define RET_E24_VECTOR_ADDR 0x6CE00000
  2076. +
  2077. +static void halt(void *hw_arg)
  2078. +{
  2079. + struct e24_hw_arg *mail_arg = hw_arg;
  2080. +
  2081. + reset_control_assert(mail_arg->rst_core);
  2082. + pr_debug("e24 halt.\n");
  2083. +}
  2084. +
  2085. +static void release(void *hw_arg)
  2086. +{
  2087. + struct e24_hw_arg *mail_arg = hw_arg;
  2088. +
  2089. + reset_control_deassert(mail_arg->rst_core);
  2090. + pr_debug("e24 begin run.\n");
  2091. +}
  2092. +
  2093. +static void reset(void *hw_arg)
  2094. +{
  2095. + struct e24_hw_arg *mail_arg = hw_arg;
  2096. +
  2097. + regmap_update_bits(mail_arg->reg_syscon, 0x24, 0xFFFFFFFF, RET_E24_VECTOR_ADDR);
  2098. + pr_debug("e24 reset vector.\n");
  2099. +}
  2100. +
  2101. +static void disable(void *hw_arg)
  2102. +{
  2103. + struct e24_hw_arg *mail_arg = hw_arg;
  2104. +
  2105. + clk_disable_unprepare(mail_arg->clk_core);
  2106. + clk_disable_unprepare(mail_arg->clk_dbg);
  2107. + clk_disable_unprepare(mail_arg->clk_rtc);
  2108. +
  2109. + pr_debug("e24 disable ...\n");
  2110. +
  2111. +}
  2112. +
  2113. +static int enable(void *hw_arg)
  2114. +{
  2115. + struct e24_hw_arg *mail_arg = hw_arg;
  2116. + int ret = 0;
  2117. +
  2118. + ret = clk_prepare_enable(mail_arg->clk_core);
  2119. + if (ret)
  2120. + return -EAGAIN;
  2121. +
  2122. + ret = clk_prepare_enable(mail_arg->clk_dbg);
  2123. + if (ret) {
  2124. + clk_disable_unprepare(mail_arg->clk_core);
  2125. + return -EAGAIN;
  2126. + }
  2127. +
  2128. + ret = clk_prepare_enable(mail_arg->clk_rtc);
  2129. + if (ret) {
  2130. + clk_disable_unprepare(mail_arg->clk_core);
  2131. + clk_disable_unprepare(mail_arg->clk_dbg);
  2132. + return -EAGAIN;
  2133. + }
  2134. +
  2135. + pr_debug("e24_enable clk ...\n");
  2136. + return 0;
  2137. +}
  2138. +
  2139. +
  2140. +static int init(void *hw_arg)
  2141. +{
  2142. + struct e24_hw_arg *mail_arg = hw_arg;
  2143. +
  2144. + mail_arg->reg_syscon = syscon_regmap_lookup_by_phandle(
  2145. + mail_arg->e24->dev->of_node,
  2146. + "starfive,stg-syscon");
  2147. + if (IS_ERR(mail_arg->reg_syscon)) {
  2148. + dev_err(mail_arg->e24->dev, "No starfive,stg-syscon\n");
  2149. + return PTR_ERR(mail_arg->reg_syscon);
  2150. + }
  2151. +
  2152. + mail_arg->clk_core = devm_clk_get_optional(mail_arg->e24->dev, "clk_core");
  2153. + if (IS_ERR(mail_arg->clk_core)) {
  2154. + dev_err(mail_arg->e24->dev, "failed to get e24 clk core\n");
  2155. + return -ENOMEM;
  2156. + }
  2157. +
  2158. + mail_arg->clk_dbg = devm_clk_get_optional(mail_arg->e24->dev, "clk_dbg");
  2159. + if (IS_ERR(mail_arg->clk_dbg)) {
  2160. + dev_err(mail_arg->e24->dev, "failed to get e24 clk dbg\n");
  2161. + return -ENOMEM;
  2162. + }
  2163. +
  2164. + mail_arg->clk_rtc = devm_clk_get_optional(mail_arg->e24->dev, "clk_rtc");
  2165. + if (IS_ERR(mail_arg->clk_rtc)) {
  2166. + dev_err(mail_arg->e24->dev, "failed to get e24 clk rtc\n");
  2167. + return -ENOMEM;
  2168. + }
  2169. +
  2170. + mail_arg->rst_core = devm_reset_control_get_exclusive(mail_arg->e24->dev, "e24_core");
  2171. + if (IS_ERR(mail_arg->rst_core)) {
  2172. + dev_err(mail_arg->e24->dev, "failed to get e24 reset\n");
  2173. + return -ENOMEM;
  2174. + }
  2175. +
  2176. + enable(hw_arg);
  2177. +
  2178. + return 0;
  2179. +}
  2180. +
  2181. +static struct e24_hw_ops e24_hw_ops = {
  2182. + .init = init,
  2183. + .enable = enable,
  2184. + .reset = reset,
  2185. + .halt = halt,
  2186. + .release = release,
  2187. + .disable = disable,
  2188. +};
  2189. +
  2190. +struct e24_hw_ops *e24_get_hw_ops(void)
  2191. +{
  2192. + return &e24_hw_ops;
  2193. +}
  2194. --- /dev/null
  2195. +++ b/drivers/e24/starfive_e24_hw.h
  2196. @@ -0,0 +1,94 @@
  2197. +/* SPDX-License-Identifier: GPL-2.0 */
  2198. +#ifndef __STARFIVE_E24_HW_H__
  2199. +#define __STARFIVE_E24_HW_H__
  2200. +/*
  2201. + * Hardware-specific operation entry points.
  2202. + */
  2203. +struct e24_hw_ops {
  2204. + /*
  2205. + * Gets the clock for E24.
  2206. + */
  2207. + int (*init)(void *hw_arg);
  2208. + /*
  2209. + * Enable power/clock, but keep the core stalled.
  2210. + */
  2211. + int (*enable)(void *hw_arg);
  2212. + /*
  2213. + * Diable power/clock.
  2214. + */
  2215. + void (*disable)(void *hw_arg);
  2216. + /*
  2217. + * Reset the core.
  2218. + */
  2219. + void (*reset)(void *hw_arg);
  2220. + /*
  2221. + * Unstall the core.
  2222. + */
  2223. + void (*release)(void *hw_arg);
  2224. + /*
  2225. + * Stall the core.
  2226. + */
  2227. + void (*halt)(void *hw_arg);
  2228. +
  2229. + /* Get HW-specific data to pass to the DSP on synchronization
  2230. + *
  2231. + * param hw_arg: opaque parameter passed to DSP at initialization
  2232. + * param sz: return size of sync data here
  2233. + * return a buffer allocated with kmalloc that the caller will free
  2234. + */
  2235. + void *(*get_hw_sync_data)(void *hw_arg, size_t *sz);
  2236. +
  2237. + /*
  2238. + * Send IRQ to the core.
  2239. + */
  2240. + void (*send_irq)(void *hw_arg);
  2241. +
  2242. + /*
  2243. + * Check whether region of physical memory may be handled by
  2244. + * dma_sync_* operations
  2245. + *
  2246. + * \param hw_arg: opaque parameter passed to DSP at initialization
  2247. + * time
  2248. + */
  2249. + bool (*cacheable)(void *hw_arg, unsigned long pfn, unsigned long n_pages);
  2250. + /*
  2251. + * Synchronize region of memory for DSP access.
  2252. + *
  2253. + * \param hw_arg: opaque parameter passed to DSP at initialization
  2254. + * time
  2255. + */
  2256. + void (*dma_sync_for_device)(void *hw_arg,
  2257. + void *vaddr, phys_addr_t paddr,
  2258. + unsigned long sz, unsigned int flags);
  2259. + /*
  2260. + * Synchronize region of memory for host access.
  2261. + *
  2262. + * \param hw_arg: opaque parameter passed to DSP at initialization
  2263. + * time
  2264. + */
  2265. + void (*dma_sync_for_cpu)(void *hw_arg,
  2266. + void *vaddr, phys_addr_t paddr,
  2267. + unsigned long sz, unsigned int flags);
  2268. +
  2269. + /*
  2270. + * memcpy data/code to device-specific memory.
  2271. + */
  2272. + void (*memcpy_tohw)(void __iomem *dst, const void *src, size_t sz);
  2273. + /*
  2274. + * memset device-specific memory.
  2275. + */
  2276. + void (*memset_hw)(void __iomem *dst, int c, size_t sz);
  2277. +
  2278. + /*
  2279. + * Check DSP status.
  2280. + *
  2281. + * \param hw_arg: opaque parameter passed to DSP at initialization
  2282. + * time
  2283. + * \return whether the core has crashed and needs to be restarted
  2284. + */
  2285. + bool (*panic_check)(void *hw_arg);
  2286. +};
  2287. +
  2288. +long e24_init_hw(struct platform_device *pdev, void *hw_arg);
  2289. +struct e24_hw_ops *e24_get_hw_ops(void);
  2290. +#endif