2
0

0013-v5.6-crypto-qce-allow-building-only-hashes-ciphers.patch 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. From 59e056cda4beb5412e3653e6360c2eb0fa770baa Mon Sep 17 00:00:00 2001
  2. From: Eneas U de Queiroz <[email protected]>
  3. Date: Fri, 20 Dec 2019 16:02:18 -0300
  4. Subject: [PATCH 07/11] crypto: qce - allow building only hashes/ciphers
  5. Allow the user to choose whether to build support for all algorithms
  6. (default), hashes-only, or skciphers-only.
  7. The QCE engine does not appear to scale as well as the CPU to handle
  8. multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
  9. QCE handles only 2 requests in parallel.
  10. Ipsec throughput seems to improve when disabling either family of
  11. algorithms, sharing the load with the CPU. Enabling skciphers-only
  12. appears to work best.
  13. Signed-off-by: Eneas U de Queiroz <[email protected]>
  14. Signed-off-by: Herbert Xu <[email protected]>
  15. ---
  16. --- a/drivers/crypto/Kconfig
  17. +++ b/drivers/crypto/Kconfig
  18. @@ -617,6 +617,14 @@ config CRYPTO_DEV_QCE
  19. tristate "Qualcomm crypto engine accelerator"
  20. depends on ARCH_QCOM || COMPILE_TEST
  21. depends on HAS_IOMEM
  22. + help
  23. + This driver supports Qualcomm crypto engine accelerator
  24. + hardware. To compile this driver as a module, choose M here. The
  25. + module will be called qcrypto.
  26. +
  27. +config CRYPTO_DEV_QCE_SKCIPHER
  28. + bool
  29. + depends on CRYPTO_DEV_QCE
  30. select CRYPTO_AES
  31. select CRYPTO_LIB_DES
  32. select CRYPTO_ECB
  33. @@ -624,10 +632,57 @@ config CRYPTO_DEV_QCE
  34. select CRYPTO_XTS
  35. select CRYPTO_CTR
  36. select CRYPTO_BLKCIPHER
  37. +
  38. +config CRYPTO_DEV_QCE_SHA
  39. + bool
  40. + depends on CRYPTO_DEV_QCE
  41. +
  42. +choice
  43. + prompt "Algorithms enabled for QCE acceleration"
  44. + default CRYPTO_DEV_QCE_ENABLE_ALL
  45. + depends on CRYPTO_DEV_QCE
  46. help
  47. - This driver supports Qualcomm crypto engine accelerator
  48. - hardware. To compile this driver as a module, choose M here. The
  49. - module will be called qcrypto.
  50. + This option allows to choose whether to build support for all algorihtms
  51. + (default), hashes-only, or skciphers-only.
  52. +
  53. + The QCE engine does not appear to scale as well as the CPU to handle
  54. + multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
  55. + QCE handles only 2 requests in parallel.
  56. +
  57. + Ipsec throughput seems to improve when disabling either family of
  58. + algorithms, sharing the load with the CPU. Enabling skciphers-only
  59. + appears to work best.
  60. +
  61. + config CRYPTO_DEV_QCE_ENABLE_ALL
  62. + bool "All supported algorithms"
  63. + select CRYPTO_DEV_QCE_SKCIPHER
  64. + select CRYPTO_DEV_QCE_SHA
  65. + help
  66. + Enable all supported algorithms:
  67. + - AES (CBC, CTR, ECB, XTS)
  68. + - 3DES (CBC, ECB)
  69. + - DES (CBC, ECB)
  70. + - SHA1, HMAC-SHA1
  71. + - SHA256, HMAC-SHA256
  72. +
  73. + config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
  74. + bool "Symmetric-key ciphers only"
  75. + select CRYPTO_DEV_QCE_SKCIPHER
  76. + help
  77. + Enable symmetric-key ciphers only:
  78. + - AES (CBC, CTR, ECB, XTS)
  79. + - 3DES (ECB, CBC)
  80. + - DES (ECB, CBC)
  81. +
  82. + config CRYPTO_DEV_QCE_ENABLE_SHA
  83. + bool "Hash/HMAC only"
  84. + select CRYPTO_DEV_QCE_SHA
  85. + help
  86. + Enable hashes/HMAC algorithms only:
  87. + - SHA1, HMAC-SHA1
  88. + - SHA256, HMAC-SHA256
  89. +
  90. +endchoice
  91. config CRYPTO_DEV_QCOM_RNG
  92. tristate "Qualcomm Random Number Generator Driver"
  93. --- a/drivers/crypto/qce/Makefile
  94. +++ b/drivers/crypto/qce/Makefile
  95. @@ -2,6 +2,7 @@
  96. obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
  97. qcrypto-objs := core.o \
  98. common.o \
  99. - dma.o \
  100. - sha.o \
  101. - skcipher.o
  102. + dma.o
  103. +
  104. +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
  105. +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
  106. --- a/drivers/crypto/qce/common.c
  107. +++ b/drivers/crypto/qce/common.c
  108. @@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce,
  109. qce_write(qce, offset + i * sizeof(u32), 0);
  110. }
  111. -static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
  112. +static u32 qce_config_reg(struct qce_device *qce, int little)
  113. {
  114. - u32 cfg = 0;
  115. + u32 beats = (qce->burst_size >> 3) - 1;
  116. + u32 pipe_pair = qce->pipe_pair_id;
  117. + u32 config;
  118. - if (IS_AES(flags)) {
  119. - if (aes_key_size == AES_KEYSIZE_128)
  120. - cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
  121. - else if (aes_key_size == AES_KEYSIZE_256)
  122. - cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
  123. - }
  124. + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
  125. + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
  126. + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
  127. + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
  128. + config &= ~HIGH_SPD_EN_N_SHIFT;
  129. - if (IS_AES(flags))
  130. - cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
  131. - else if (IS_DES(flags) || IS_3DES(flags))
  132. - cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
  133. + if (little)
  134. + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
  135. - if (IS_DES(flags))
  136. - cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
  137. + return config;
  138. +}
  139. - if (IS_3DES(flags))
  140. - cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
  141. +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
  142. +{
  143. + __be32 *d = dst;
  144. + const u8 *s = src;
  145. + unsigned int n;
  146. - switch (flags & QCE_MODE_MASK) {
  147. - case QCE_MODE_ECB:
  148. - cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
  149. - break;
  150. - case QCE_MODE_CBC:
  151. - cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
  152. - break;
  153. - case QCE_MODE_CTR:
  154. - cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
  155. - break;
  156. - case QCE_MODE_XTS:
  157. - cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
  158. - break;
  159. - case QCE_MODE_CCM:
  160. - cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
  161. - cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
  162. - break;
  163. - default:
  164. - return ~0;
  165. + n = len / sizeof(u32);
  166. + for (; n > 0; n--) {
  167. + *d = cpu_to_be32p((const __u32 *) s);
  168. + s += sizeof(__u32);
  169. + d++;
  170. }
  171. +}
  172. - return cfg;
  173. +static void qce_setup_config(struct qce_device *qce)
  174. +{
  175. + u32 config;
  176. +
  177. + /* get big endianness */
  178. + config = qce_config_reg(qce, 0);
  179. +
  180. + /* clear status */
  181. + qce_write(qce, REG_STATUS, 0);
  182. + qce_write(qce, REG_CONFIG, config);
  183. +}
  184. +
  185. +static inline void qce_crypto_go(struct qce_device *qce)
  186. +{
  187. + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
  188. }
  189. +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  190. static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
  191. {
  192. u32 cfg = 0;
  193. @@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long fl
  194. return cfg;
  195. }
  196. -static u32 qce_config_reg(struct qce_device *qce, int little)
  197. -{
  198. - u32 beats = (qce->burst_size >> 3) - 1;
  199. - u32 pipe_pair = qce->pipe_pair_id;
  200. - u32 config;
  201. -
  202. - config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
  203. - config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
  204. - BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
  205. - config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
  206. - config &= ~HIGH_SPD_EN_N_SHIFT;
  207. -
  208. - if (little)
  209. - config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
  210. -
  211. - return config;
  212. -}
  213. -
  214. -void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
  215. -{
  216. - __be32 *d = dst;
  217. - const u8 *s = src;
  218. - unsigned int n;
  219. -
  220. - n = len / sizeof(u32);
  221. - for (; n > 0; n--) {
  222. - *d = cpu_to_be32p((const __u32 *) s);
  223. - s += sizeof(__u32);
  224. - d++;
  225. - }
  226. -}
  227. -
  228. -static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
  229. -{
  230. - u8 swap[QCE_AES_IV_LENGTH];
  231. - u32 i, j;
  232. -
  233. - if (ivsize > QCE_AES_IV_LENGTH)
  234. - return;
  235. -
  236. - memset(swap, 0, QCE_AES_IV_LENGTH);
  237. -
  238. - for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
  239. - i < QCE_AES_IV_LENGTH; i++, j--)
  240. - swap[i] = src[j];
  241. -
  242. - qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
  243. -}
  244. -
  245. -static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
  246. - unsigned int enckeylen, unsigned int cryptlen)
  247. -{
  248. - u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
  249. - unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
  250. - unsigned int xtsdusize;
  251. -
  252. - qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
  253. - enckeylen / 2);
  254. - qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
  255. -
  256. - /* xts du size 512B */
  257. - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
  258. - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
  259. -}
  260. -
  261. -static void qce_setup_config(struct qce_device *qce)
  262. -{
  263. - u32 config;
  264. -
  265. - /* get big endianness */
  266. - config = qce_config_reg(qce, 0);
  267. -
  268. - /* clear status */
  269. - qce_write(qce, REG_STATUS, 0);
  270. - qce_write(qce, REG_CONFIG, config);
  271. -}
  272. -
  273. -static inline void qce_crypto_go(struct qce_device *qce)
  274. -{
  275. - qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
  276. -}
  277. -
  278. static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
  279. u32 totallen, u32 offset)
  280. {
  281. @@ -303,6 +225,87 @@ go_proc:
  282. return 0;
  283. }
  284. +#endif
  285. +
  286. +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  287. +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
  288. +{
  289. + u32 cfg = 0;
  290. +
  291. + if (IS_AES(flags)) {
  292. + if (aes_key_size == AES_KEYSIZE_128)
  293. + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
  294. + else if (aes_key_size == AES_KEYSIZE_256)
  295. + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
  296. + }
  297. +
  298. + if (IS_AES(flags))
  299. + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
  300. + else if (IS_DES(flags) || IS_3DES(flags))
  301. + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
  302. +
  303. + if (IS_DES(flags))
  304. + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
  305. +
  306. + if (IS_3DES(flags))
  307. + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
  308. +
  309. + switch (flags & QCE_MODE_MASK) {
  310. + case QCE_MODE_ECB:
  311. + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
  312. + break;
  313. + case QCE_MODE_CBC:
  314. + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
  315. + break;
  316. + case QCE_MODE_CTR:
  317. + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
  318. + break;
  319. + case QCE_MODE_XTS:
  320. + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
  321. + break;
  322. + case QCE_MODE_CCM:
  323. + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
  324. + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
  325. + break;
  326. + default:
  327. + return ~0;
  328. + }
  329. +
  330. + return cfg;
  331. +}
  332. +
  333. +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
  334. +{
  335. + u8 swap[QCE_AES_IV_LENGTH];
  336. + u32 i, j;
  337. +
  338. + if (ivsize > QCE_AES_IV_LENGTH)
  339. + return;
  340. +
  341. + memset(swap, 0, QCE_AES_IV_LENGTH);
  342. +
  343. + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
  344. + i < QCE_AES_IV_LENGTH; i++, j--)
  345. + swap[i] = src[j];
  346. +
  347. + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
  348. +}
  349. +
  350. +static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
  351. + unsigned int enckeylen, unsigned int cryptlen)
  352. +{
  353. + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
  354. + unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
  355. + unsigned int xtsdusize;
  356. +
  357. + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
  358. + enckeylen / 2);
  359. + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
  360. +
  361. + /* xts du size 512B */
  362. + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
  363. + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
  364. +}
  365. static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
  366. u32 totallen, u32 offset)
  367. @@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struc
  368. return 0;
  369. }
  370. +#endif
  371. int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
  372. u32 offset)
  373. {
  374. switch (type) {
  375. +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  376. case CRYPTO_ALG_TYPE_SKCIPHER:
  377. return qce_setup_regs_skcipher(async_req, totallen, offset);
  378. +#endif
  379. +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  380. case CRYPTO_ALG_TYPE_AHASH:
  381. return qce_setup_regs_ahash(async_req, totallen, offset);
  382. +#endif
  383. default:
  384. return -EINVAL;
  385. }
  386. --- a/drivers/crypto/qce/core.c
  387. +++ b/drivers/crypto/qce/core.c
  388. @@ -22,8 +22,12 @@
  389. #define QCE_QUEUE_LENGTH 1
  390. static const struct qce_algo_ops *qce_ops[] = {
  391. +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  392. &skcipher_ops,
  393. +#endif
  394. +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  395. &ahash_ops,
  396. +#endif
  397. };
  398. static void qce_unregister_algs(struct qce_device *qce)