cryptosoft.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * An OCF module that uses the linux kernel cryptoapi, based on the
  3. * original cryptosoft for BSD by Angelos D. Keromytis ([email protected])
  4. * but is mostly unrecognisable,
  5. *
  6. * Written by David McCullough <[email protected]>
  7. * Copyright (C) 2004-2011 David McCullough
  8. * Copyright (C) 2004-2005 Intel Corporation.
  9. *
  10. * LICENSE TERMS
  11. *
  12. * The free distribution and use of this software in both source and binary
  13. * form is allowed (with or without changes) provided that:
  14. *
  15. * 1. distributions of this source code include the above copyright
  16. * notice, this list of conditions and the following disclaimer;
  17. *
  18. * 2. distributions in binary form include the above copyright
  19. * notice, this list of conditions and the following disclaimer
  20. * in the documentation and/or other associated materials;
  21. *
  22. * 3. the copyright holder's name is not used to endorse products
  23. * built using this software without specific written permission.
  24. *
  25. * ALTERNATIVELY, provided that this notice is retained in full, this product
  26. * may be distributed under the terms of the GNU General Public License (GPL),
  27. * in which case the provisions of the GPL apply INSTEAD OF those given above.
  28. *
  29. * DISCLAIMER
  30. *
  31. * This software is provided 'as is' with no explicit or implied warranties
  32. * in respect of its properties, including, but not limited to, correctness
  33. * and/or fitness for purpose.
  34. * ---------------------------------------------------------------------------
  35. */
  36. #include <linux/version.h>
  37. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
  38. #include <linux/config.h>
  39. #endif
  40. #include <linux/module.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43. #include <linux/slab.h>
  44. #include <linux/sched.h>
  45. #include <linux/wait.h>
  46. #include <linux/crypto.h>
  47. #include <linux/mm.h>
  48. #include <linux/skbuff.h>
  49. #include <linux/random.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/spinlock.h>
  52. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
  53. #include <linux/scatterlist.h>
  54. #endif
  55. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
  56. #include <crypto/hash.h>
  57. #endif
  58. #include <cryptodev.h>
  59. #include <uio.h>
  60. struct {
  61. softc_device_decl sc_dev;
  62. } swcr_softc;
  63. #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
  64. #define SW_TYPE_CIPHER 0x01
  65. #define SW_TYPE_HMAC 0x02
  66. #define SW_TYPE_HASH 0x04
  67. #define SW_TYPE_COMP 0x08
  68. #define SW_TYPE_BLKCIPHER 0x10
  69. #define SW_TYPE_ALG_MASK 0x1f
  70. #define SW_TYPE_ASYNC 0x8000
  71. #define SW_TYPE_INUSE 0x10000000
  72. /* We change some of the above if we have an async interface */
  73. #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
  74. #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
  75. #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
  76. #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
  77. #define SCATTERLIST_MAX 16
  78. struct swcr_data {
  79. struct work_struct workq;
  80. int sw_type;
  81. int sw_alg;
  82. struct crypto_tfm *sw_tfm;
  83. spinlock_t sw_tfm_lock;
  84. union {
  85. struct {
  86. char *sw_key;
  87. int sw_klen;
  88. int sw_mlen;
  89. } hmac;
  90. void *sw_comp_buf;
  91. } u;
  92. struct swcr_data *sw_next;
  93. };
  94. struct swcr_req {
  95. struct swcr_data *sw_head;
  96. struct swcr_data *sw;
  97. struct cryptop *crp;
  98. struct cryptodesc *crd;
  99. struct scatterlist sg[SCATTERLIST_MAX];
  100. unsigned char iv[EALG_MAX_BLOCK_LEN];
  101. char result[HASH_MAX_LEN];
  102. void *crypto_req;
  103. };
  104. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  105. static kmem_cache_t *swcr_req_cache;
  106. #else
  107. static struct kmem_cache *swcr_req_cache;
  108. #endif
  109. #ifndef CRYPTO_TFM_MODE_CBC
  110. /*
  111. * As of linux-2.6.21 this is no longer defined, and presumably no longer
  112. * needed to be passed into the crypto core code.
  113. */
  114. #define CRYPTO_TFM_MODE_CBC 0
  115. #define CRYPTO_TFM_MODE_ECB 0
  116. #endif
  117. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  118. /*
  119. * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
  120. * API into old API.
  121. */
  122. /* Symmetric/Block Cipher */
  123. struct blkcipher_desc
  124. {
  125. struct crypto_tfm *tfm;
  126. void *info;
  127. };
  128. #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
  129. #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
  130. #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
  131. #define crypto_blkcipher_cast(X) X
  132. #define crypto_blkcipher_tfm(X) X
  133. #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
  134. #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
  135. #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
  136. #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
  137. #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
  138. crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  139. #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
  140. crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  141. #define crypto_blkcipher_set_flags(x, y) /* nop */
  142. #define crypto_free_blkcipher(x) crypto_free_tfm(x)
  143. #define crypto_free_comp crypto_free_tfm
  144. #define crypto_free_hash crypto_free_tfm
  145. /* Hash/HMAC/Digest */
  146. struct hash_desc
  147. {
  148. struct crypto_tfm *tfm;
  149. };
  150. #define hmac(X) #X , 0
  151. #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
  152. #define crypto_hash_cast(X) X
  153. #define crypto_hash_tfm(X) X
  154. #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
  155. #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
  156. #define crypto_hash_digest(W, X, Y, Z) \
  157. crypto_digest_digest((W)->tfm, X, sg_num, Z)
  158. /* Asymmetric Cipher */
  159. #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
  160. /* Compression */
  161. #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
  162. #define crypto_comp_tfm(X) X
  163. #define crypto_comp_cast(X) X
  164. #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
  165. #define plain(X) #X , 0
  166. #else
  167. #define ecb(X) "ecb(" #X ")" , 0
  168. #define cbc(X) "cbc(" #X ")" , 0
  169. #define hmac(X) "hmac(" #X ")" , 0
  170. #define plain(X) #X , 0
  171. #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  172. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
  173. /* no ablkcipher in older kernels */
  174. #define crypto_alloc_ablkcipher(a,b,c) (NULL)
  175. #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
  176. #define crypto_ablkcipher_set_flags(a, b) /* nop */
  177. #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
  178. #define crypto_has_ablkcipher(a,b,c) (0)
  179. #else
  180. #define HAVE_ABLKCIPHER
  181. #endif
  182. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
  183. /* no ahash in older kernels */
  184. #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
  185. #define crypto_alloc_ahash(a,b,c) (NULL)
  186. #define crypto_ahash_digestsize(x) 0
  187. #else
  188. #define HAVE_AHASH
  189. #endif
  190. struct crypto_details {
  191. char *alg_name;
  192. int mode;
  193. int sw_type;
  194. };
  195. static struct crypto_details crypto_details[] = {
  196. [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
  197. [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
  198. [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
  199. [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
  200. [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
  201. [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
  202. [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
  203. [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
  204. [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
  205. [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
  206. [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
  207. [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
  208. [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
  209. [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
  210. [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
  211. [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
  212. [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
  213. [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
  214. [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
  215. [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
  216. [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
  217. [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
  218. [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
  219. [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
  220. [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
  221. };
  222. int32_t swcr_id = -1;
  223. module_param(swcr_id, int, 0444);
  224. MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
  225. int swcr_fail_if_compression_grows = 1;
  226. module_param(swcr_fail_if_compression_grows, int, 0644);
  227. MODULE_PARM_DESC(swcr_fail_if_compression_grows,
  228. "Treat compression that results in more data as a failure");
  229. int swcr_no_ahash = 0;
  230. module_param(swcr_no_ahash, int, 0644);
  231. MODULE_PARM_DESC(swcr_no_ahash,
  232. "Do not use async hash/hmac even if available");
  233. int swcr_no_ablk = 0;
  234. module_param(swcr_no_ablk, int, 0644);
  235. MODULE_PARM_DESC(swcr_no_ablk,
  236. "Do not use async blk ciphers even if available");
  237. static struct swcr_data **swcr_sessions = NULL;
  238. static u_int32_t swcr_sesnum = 0;
  239. static int swcr_process(device_t, struct cryptop *, int);
  240. static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
  241. static int swcr_freesession(device_t, u_int64_t);
  242. static device_method_t swcr_methods = {
  243. /* crypto device methods */
  244. DEVMETHOD(cryptodev_newsession, swcr_newsession),
  245. DEVMETHOD(cryptodev_freesession,swcr_freesession),
  246. DEVMETHOD(cryptodev_process, swcr_process),
  247. };
  248. #define debug swcr_debug
  249. int swcr_debug = 0;
  250. module_param(swcr_debug, int, 0644);
  251. MODULE_PARM_DESC(swcr_debug, "Enable debug");
  252. static void swcr_process_req(struct swcr_req *req);
  253. /*
  254. * somethings just need to be run with user context no matter whether
  255. * the kernel compression libs use vmalloc/vfree for example.
  256. */
  257. typedef struct {
  258. struct work_struct wq;
  259. void (*func)(void *arg);
  260. void *arg;
  261. } execute_later_t;
  262. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  263. static void
  264. doing_it_now(struct work_struct *wq)
  265. {
  266. execute_later_t *w = container_of(wq, execute_later_t, wq);
  267. (w->func)(w->arg);
  268. kfree(w);
  269. }
  270. #else
  271. static void
  272. doing_it_now(void *arg)
  273. {
  274. execute_later_t *w = (execute_later_t *) arg;
  275. (w->func)(w->arg);
  276. kfree(w);
  277. }
  278. #endif
  279. static void
  280. execute_later(void (fn)(void *), void *arg)
  281. {
  282. execute_later_t *w;
  283. w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
  284. if (w) {
  285. memset(w, '\0', sizeof(w));
  286. w->func = fn;
  287. w->arg = arg;
  288. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  289. INIT_WORK(&w->wq, doing_it_now);
  290. #else
  291. INIT_WORK(&w->wq, doing_it_now, w);
  292. #endif
  293. schedule_work(&w->wq);
  294. }
  295. }
  296. /*
  297. * Generate a new software session.
  298. */
  299. static int
  300. swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
  301. {
  302. struct swcr_data **swd;
  303. u_int32_t i;
  304. int error;
  305. char *algo;
  306. int mode;
  307. dprintk("%s()\n", __FUNCTION__);
  308. if (sid == NULL || cri == NULL) {
  309. dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
  310. return EINVAL;
  311. }
  312. if (swcr_sessions) {
  313. for (i = 1; i < swcr_sesnum; i++)
  314. if (swcr_sessions[i] == NULL)
  315. break;
  316. } else
  317. i = 1; /* NB: to silence compiler warning */
  318. if (swcr_sessions == NULL || i == swcr_sesnum) {
  319. if (swcr_sessions == NULL) {
  320. i = 1; /* We leave swcr_sessions[0] empty */
  321. swcr_sesnum = CRYPTO_SW_SESSIONS;
  322. } else
  323. swcr_sesnum *= 2;
  324. swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
  325. if (swd == NULL) {
  326. /* Reset session number */
  327. if (swcr_sesnum == CRYPTO_SW_SESSIONS)
  328. swcr_sesnum = 0;
  329. else
  330. swcr_sesnum /= 2;
  331. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  332. return ENOBUFS;
  333. }
  334. memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
  335. /* Copy existing sessions */
  336. if (swcr_sessions) {
  337. memcpy(swd, swcr_sessions,
  338. (swcr_sesnum / 2) * sizeof(struct swcr_data *));
  339. kfree(swcr_sessions);
  340. }
  341. swcr_sessions = swd;
  342. }
  343. swd = &swcr_sessions[i];
  344. *sid = i;
  345. while (cri) {
  346. *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
  347. SLAB_ATOMIC);
  348. if (*swd == NULL) {
  349. swcr_freesession(NULL, i);
  350. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  351. return ENOBUFS;
  352. }
  353. memset(*swd, 0, sizeof(struct swcr_data));
  354. if (cri->cri_alg < 0 ||
  355. cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
  356. printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
  357. swcr_freesession(NULL, i);
  358. return EINVAL;
  359. }
  360. algo = crypto_details[cri->cri_alg].alg_name;
  361. if (!algo || !*algo) {
  362. printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
  363. swcr_freesession(NULL, i);
  364. return EINVAL;
  365. }
  366. mode = crypto_details[cri->cri_alg].mode;
  367. (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
  368. (*swd)->sw_alg = cri->cri_alg;
  369. spin_lock_init(&(*swd)->sw_tfm_lock);
  370. /* Algorithm specific configuration */
  371. switch (cri->cri_alg) {
  372. case CRYPTO_NULL_CBC:
  373. cri->cri_klen = 0; /* make it work with crypto API */
  374. break;
  375. default:
  376. break;
  377. }
  378. if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
  379. dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
  380. algo, mode);
  381. /* try async first */
  382. (*swd)->sw_tfm = swcr_no_ablk ? NULL :
  383. crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
  384. if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
  385. dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
  386. (*swd)->sw_type |= SW_TYPE_ASYNC;
  387. } else {
  388. (*swd)->sw_tfm = crypto_blkcipher_tfm(
  389. crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
  390. if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
  391. dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
  392. }
  393. if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
  394. int err;
  395. dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
  396. algo,mode);
  397. err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
  398. (*swd)->sw_tfm = NULL; /* ensure NULL */
  399. swcr_freesession(NULL, i);
  400. return err;
  401. }
  402. if (debug) {
  403. dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
  404. __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
  405. for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
  406. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  407. cri->cri_key[i] & 0xff);
  408. dprintk("\n");
  409. }
  410. if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  411. /* OCF doesn't enforce keys */
  412. crypto_ablkcipher_set_flags(
  413. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  414. CRYPTO_TFM_REQ_WEAK_KEY);
  415. error = crypto_ablkcipher_setkey(
  416. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  417. cri->cri_key, (cri->cri_klen + 7) / 8);
  418. } else {
  419. /* OCF doesn't enforce keys */
  420. crypto_blkcipher_set_flags(
  421. crypto_blkcipher_cast((*swd)->sw_tfm),
  422. CRYPTO_TFM_REQ_WEAK_KEY);
  423. error = crypto_blkcipher_setkey(
  424. crypto_blkcipher_cast((*swd)->sw_tfm),
  425. cri->cri_key, (cri->cri_klen + 7) / 8);
  426. }
  427. if (error) {
  428. printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
  429. (*swd)->sw_tfm->crt_flags);
  430. swcr_freesession(NULL, i);
  431. return error;
  432. }
  433. } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
  434. dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
  435. algo, mode);
  436. /* try async first */
  437. (*swd)->sw_tfm = swcr_no_ahash ? NULL :
  438. crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
  439. if ((*swd)->sw_tfm) {
  440. dprintk("%s %s hash is async\n", __FUNCTION__, algo);
  441. (*swd)->sw_type |= SW_TYPE_ASYNC;
  442. } else {
  443. dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
  444. (*swd)->sw_tfm = crypto_hash_tfm(
  445. crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
  446. }
  447. if (!(*swd)->sw_tfm) {
  448. dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
  449. algo, mode);
  450. swcr_freesession(NULL, i);
  451. return EINVAL;
  452. }
  453. (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
  454. (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
  455. SLAB_ATOMIC);
  456. if ((*swd)->u.hmac.sw_key == NULL) {
  457. swcr_freesession(NULL, i);
  458. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  459. return ENOBUFS;
  460. }
  461. memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
  462. if (cri->cri_mlen) {
  463. (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
  464. } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  465. (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
  466. __crypto_ahash_cast((*swd)->sw_tfm));
  467. } else {
  468. (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
  469. crypto_hash_cast((*swd)->sw_tfm));
  470. }
  471. } else if ((*swd)->sw_type & SW_TYPE_COMP) {
  472. (*swd)->sw_tfm = crypto_comp_tfm(
  473. crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
  474. if (!(*swd)->sw_tfm) {
  475. dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
  476. algo, mode);
  477. swcr_freesession(NULL, i);
  478. return EINVAL;
  479. }
  480. (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
  481. if ((*swd)->u.sw_comp_buf == NULL) {
  482. swcr_freesession(NULL, i);
  483. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  484. return ENOBUFS;
  485. }
  486. } else {
  487. printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
  488. swcr_freesession(NULL, i);
  489. return EINVAL;
  490. }
  491. cri = cri->cri_next;
  492. swd = &((*swd)->sw_next);
  493. }
  494. return 0;
  495. }
  496. /*
  497. * Free a session.
  498. */
  499. static int
  500. swcr_freesession(device_t dev, u_int64_t tid)
  501. {
  502. struct swcr_data *swd;
  503. u_int32_t sid = CRYPTO_SESID2LID(tid);
  504. dprintk("%s()\n", __FUNCTION__);
  505. if (sid > swcr_sesnum || swcr_sessions == NULL ||
  506. swcr_sessions[sid] == NULL) {
  507. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  508. return(EINVAL);
  509. }
  510. /* Silently accept and return */
  511. if (sid == 0)
  512. return(0);
  513. while ((swd = swcr_sessions[sid]) != NULL) {
  514. swcr_sessions[sid] = swd->sw_next;
  515. if (swd->sw_tfm) {
  516. switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
  517. #ifdef HAVE_AHASH
  518. case SW_TYPE_AHMAC:
  519. case SW_TYPE_AHASH:
  520. crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
  521. break;
  522. #endif
  523. #ifdef HAVE_ABLKCIPHER
  524. case SW_TYPE_ABLKCIPHER:
  525. crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
  526. break;
  527. #endif
  528. case SW_TYPE_BLKCIPHER:
  529. crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
  530. break;
  531. case SW_TYPE_HMAC:
  532. case SW_TYPE_HASH:
  533. crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
  534. break;
  535. case SW_TYPE_COMP:
  536. if (in_interrupt())
  537. execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
  538. else
  539. crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
  540. break;
  541. default:
  542. crypto_free_tfm(swd->sw_tfm);
  543. break;
  544. }
  545. swd->sw_tfm = NULL;
  546. }
  547. if (swd->sw_type & SW_TYPE_COMP) {
  548. if (swd->u.sw_comp_buf)
  549. kfree(swd->u.sw_comp_buf);
  550. } else {
  551. if (swd->u.hmac.sw_key)
  552. kfree(swd->u.hmac.sw_key);
  553. }
  554. kfree(swd);
  555. }
  556. return 0;
  557. }
  558. static void swcr_process_req_complete(struct swcr_req *req)
  559. {
  560. dprintk("%s()\n", __FUNCTION__);
  561. if (req->sw->sw_type & SW_TYPE_INUSE) {
  562. unsigned long flags;
  563. spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
  564. req->sw->sw_type &= ~SW_TYPE_INUSE;
  565. spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
  566. }
  567. if (req->crp->crp_etype)
  568. goto done;
  569. switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
  570. #if defined(HAVE_AHASH)
  571. case SW_TYPE_AHMAC:
  572. case SW_TYPE_AHASH:
  573. crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
  574. req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
  575. ahash_request_free(req->crypto_req);
  576. break;
  577. #endif
  578. #if defined(HAVE_ABLKCIPHER)
  579. case SW_TYPE_ABLKCIPHER:
  580. ablkcipher_request_free(req->crypto_req);
  581. break;
  582. #endif
  583. case SW_TYPE_CIPHER:
  584. case SW_TYPE_HMAC:
  585. case SW_TYPE_HASH:
  586. case SW_TYPE_COMP:
  587. case SW_TYPE_BLKCIPHER:
  588. break;
  589. default:
  590. req->crp->crp_etype = EINVAL;
  591. goto done;
  592. }
  593. req->crd = req->crd->crd_next;
  594. if (req->crd) {
  595. swcr_process_req(req);
  596. return;
  597. }
  598. done:
  599. dprintk("%s crypto_done %p\n", __FUNCTION__, req);
  600. crypto_done(req->crp);
  601. kmem_cache_free(swcr_req_cache, req);
  602. }
  603. #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
  604. static void swcr_process_callback(struct crypto_async_request *creq, int err)
  605. {
  606. struct swcr_req *req = creq->data;
  607. dprintk("%s()\n", __FUNCTION__);
  608. if (err) {
  609. if (err == -EINPROGRESS)
  610. return;
  611. dprintk("%s() fail %d\n", __FUNCTION__, -err);
  612. req->crp->crp_etype = -err;
  613. }
  614. swcr_process_req_complete(req);
  615. }
  616. #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
  617. static void swcr_process_req(struct swcr_req *req)
  618. {
  619. struct swcr_data *sw;
  620. struct cryptop *crp = req->crp;
  621. struct cryptodesc *crd = req->crd;
  622. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  623. struct uio *uiop = (struct uio *) crp->crp_buf;
  624. int sg_num, sg_len, skip;
  625. dprintk("%s()\n", __FUNCTION__);
  626. /*
  627. * Find the crypto context.
  628. *
  629. * XXX Note that the logic here prevents us from having
  630. * XXX the same algorithm multiple times in a session
  631. * XXX (or rather, we can but it won't give us the right
  632. * XXX results). To do that, we'd need some way of differentiating
  633. * XXX between the various instances of an algorithm (so we can
  634. * XXX locate the correct crypto context).
  635. */
  636. for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
  637. ;
  638. /* No such context ? */
  639. if (sw == NULL) {
  640. crp->crp_etype = EINVAL;
  641. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  642. goto done;
  643. }
  644. /*
  645. * for some types we need to ensure only one user as info is stored in
  646. * the tfm during an operation that can get corrupted
  647. */
  648. switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
  649. #ifdef HAVE_AHASH
  650. case SW_TYPE_AHMAC:
  651. case SW_TYPE_AHASH:
  652. #endif
  653. case SW_TYPE_HMAC:
  654. case SW_TYPE_HASH: {
  655. unsigned long flags;
  656. spin_lock_irqsave(&sw->sw_tfm_lock, flags);
  657. if (sw->sw_type & SW_TYPE_INUSE) {
  658. spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
  659. execute_later((void (*)(void *))swcr_process_req, (void *)req);
  660. return;
  661. }
  662. sw->sw_type |= SW_TYPE_INUSE;
  663. spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
  664. } break;
  665. }
  666. req->sw = sw;
  667. skip = crd->crd_skip;
  668. /*
  669. * setup the SG list skip from the start of the buffer
  670. */
  671. memset(req->sg, 0, sizeof(req->sg));
  672. sg_init_table(req->sg, SCATTERLIST_MAX);
  673. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  674. int i, len;
  675. sg_num = 0;
  676. sg_len = 0;
  677. if (skip < skb_headlen(skb)) {
  678. len = skb_headlen(skb) - skip;
  679. if (len + sg_len > crd->crd_len)
  680. len = crd->crd_len - sg_len;
  681. sg_set_page(&req->sg[sg_num],
  682. virt_to_page(skb->data + skip), len,
  683. offset_in_page(skb->data + skip));
  684. sg_len += len;
  685. sg_num++;
  686. skip = 0;
  687. } else
  688. skip -= skb_headlen(skb);
  689. for (i = 0; sg_len < crd->crd_len &&
  690. i < skb_shinfo(skb)->nr_frags &&
  691. sg_num < SCATTERLIST_MAX; i++) {
  692. if (skip < skb_shinfo(skb)->frags[i].size) {
  693. len = skb_shinfo(skb)->frags[i].size - skip;
  694. if (len + sg_len > crd->crd_len)
  695. len = crd->crd_len - sg_len;
  696. sg_set_page(&req->sg[sg_num],
  697. skb_shinfo(skb)->frags[i].page,
  698. len,
  699. skb_shinfo(skb)->frags[i].page_offset + skip);
  700. sg_len += len;
  701. sg_num++;
  702. skip = 0;
  703. } else
  704. skip -= skb_shinfo(skb)->frags[i].size;
  705. }
  706. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  707. int len;
  708. sg_len = 0;
  709. for (sg_num = 0; sg_len < crd->crd_len &&
  710. sg_num < uiop->uio_iovcnt &&
  711. sg_num < SCATTERLIST_MAX; sg_num++) {
  712. if (skip <= uiop->uio_iov[sg_num].iov_len) {
  713. len = uiop->uio_iov[sg_num].iov_len - skip;
  714. if (len + sg_len > crd->crd_len)
  715. len = crd->crd_len - sg_len;
  716. sg_set_page(&req->sg[sg_num],
  717. virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
  718. len,
  719. offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
  720. sg_len += len;
  721. skip = 0;
  722. } else
  723. skip -= uiop->uio_iov[sg_num].iov_len;
  724. }
  725. } else {
  726. sg_len = (crp->crp_ilen - skip);
  727. if (sg_len > crd->crd_len)
  728. sg_len = crd->crd_len;
  729. sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
  730. sg_len, offset_in_page(crp->crp_buf + skip));
  731. sg_num = 1;
  732. }
  733. switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
  734. #ifdef HAVE_AHASH
  735. case SW_TYPE_AHMAC:
  736. case SW_TYPE_AHASH:
  737. {
  738. int ret;
  739. /* check we have room for the result */
  740. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  741. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  742. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  743. crd->crd_inject, sw->u.hmac.sw_mlen);
  744. crp->crp_etype = EINVAL;
  745. goto done;
  746. }
  747. req->crypto_req =
  748. ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
  749. if (!req->crypto_req) {
  750. crp->crp_etype = ENOMEM;
  751. dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
  752. goto done;
  753. }
  754. ahash_request_set_callback(req->crypto_req,
  755. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  756. memset(req->result, 0, sizeof(req->result));
  757. if (sw->sw_type & SW_TYPE_AHMAC)
  758. crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
  759. sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
  760. ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
  761. ret = crypto_ahash_digest(req->crypto_req);
  762. switch (ret) {
  763. case -EINPROGRESS:
  764. case -EBUSY:
  765. return;
  766. default:
  767. case 0:
  768. dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
  769. crp->crp_etype = ret;
  770. goto done;
  771. }
  772. } break;
  773. #endif /* HAVE_AHASH */
  774. #ifdef HAVE_ABLKCIPHER
  775. case SW_TYPE_ABLKCIPHER: {
  776. int ret;
  777. unsigned char *ivp = req->iv;
  778. int ivsize =
  779. crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
  780. if (sg_len < crypto_ablkcipher_blocksize(
  781. __crypto_ablkcipher_cast(sw->sw_tfm))) {
  782. crp->crp_etype = EINVAL;
  783. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  784. sg_len, crypto_ablkcipher_blocksize(
  785. __crypto_ablkcipher_cast(sw->sw_tfm)));
  786. goto done;
  787. }
  788. if (ivsize > sizeof(req->iv)) {
  789. crp->crp_etype = EINVAL;
  790. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  791. goto done;
  792. }
  793. req->crypto_req = ablkcipher_request_alloc(
  794. __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
  795. if (!req->crypto_req) {
  796. crp->crp_etype = ENOMEM;
  797. dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
  798. __FILE__, __LINE__);
  799. goto done;
  800. }
  801. ablkcipher_request_set_callback(req->crypto_req,
  802. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  803. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  804. int i, error;
  805. if (debug) {
  806. dprintk("%s key:", __FUNCTION__);
  807. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  808. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  809. crd->crd_key[i] & 0xff);
  810. dprintk("\n");
  811. }
  812. /* OCF doesn't enforce keys */
  813. crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
  814. CRYPTO_TFM_REQ_WEAK_KEY);
  815. error = crypto_ablkcipher_setkey(
  816. __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
  817. (crd->crd_klen + 7) / 8);
  818. if (error) {
  819. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  820. error, sw->sw_tfm->crt_flags);
  821. crp->crp_etype = -error;
  822. }
  823. }
  824. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  825. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  826. ivp = crd->crd_iv;
  827. else
  828. get_random_bytes(ivp, ivsize);
  829. /*
  830. * do we have to copy the IV back to the buffer ?
  831. */
  832. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  833. crypto_copyback(crp->crp_flags, crp->crp_buf,
  834. crd->crd_inject, ivsize, (caddr_t)ivp);
  835. }
  836. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  837. sg_len, ivp);
  838. ret = crypto_ablkcipher_encrypt(req->crypto_req);
  839. } else { /*decrypt */
  840. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  841. ivp = crd->crd_iv;
  842. else
  843. crypto_copydata(crp->crp_flags, crp->crp_buf,
  844. crd->crd_inject, ivsize, (caddr_t)ivp);
  845. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  846. sg_len, ivp);
  847. ret = crypto_ablkcipher_decrypt(req->crypto_req);
  848. }
  849. switch (ret) {
  850. case -EINPROGRESS:
  851. case -EBUSY:
  852. return;
  853. default:
  854. case 0:
  855. dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
  856. crp->crp_etype = ret;
  857. goto done;
  858. }
  859. } break;
  860. #endif /* HAVE_ABLKCIPHER */
  861. case SW_TYPE_BLKCIPHER: {
  862. unsigned char iv[EALG_MAX_BLOCK_LEN];
  863. unsigned char *ivp = iv;
  864. struct blkcipher_desc desc;
  865. int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
  866. if (sg_len < crypto_blkcipher_blocksize(
  867. crypto_blkcipher_cast(sw->sw_tfm))) {
  868. crp->crp_etype = EINVAL;
  869. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  870. sg_len, crypto_blkcipher_blocksize(
  871. crypto_blkcipher_cast(sw->sw_tfm)));
  872. goto done;
  873. }
  874. if (ivsize > sizeof(iv)) {
  875. crp->crp_etype = EINVAL;
  876. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  877. goto done;
  878. }
  879. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  880. int i, error;
  881. if (debug) {
  882. dprintk("%s key:", __FUNCTION__);
  883. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  884. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  885. crd->crd_key[i] & 0xff);
  886. dprintk("\n");
  887. }
  888. /* OCF doesn't enforce keys */
  889. crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
  890. CRYPTO_TFM_REQ_WEAK_KEY);
  891. error = crypto_blkcipher_setkey(
  892. crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
  893. (crd->crd_klen + 7) / 8);
  894. if (error) {
  895. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  896. error, sw->sw_tfm->crt_flags);
  897. crp->crp_etype = -error;
  898. }
  899. }
  900. memset(&desc, 0, sizeof(desc));
  901. desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
  902. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  903. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  904. ivp = crd->crd_iv;
  905. } else {
  906. get_random_bytes(ivp, ivsize);
  907. }
  908. /*
  909. * do we have to copy the IV back to the buffer ?
  910. */
  911. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  912. crypto_copyback(crp->crp_flags, crp->crp_buf,
  913. crd->crd_inject, ivsize, (caddr_t)ivp);
  914. }
  915. desc.info = ivp;
  916. crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
  917. } else { /*decrypt */
  918. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  919. ivp = crd->crd_iv;
  920. } else {
  921. crypto_copydata(crp->crp_flags, crp->crp_buf,
  922. crd->crd_inject, ivsize, (caddr_t)ivp);
  923. }
  924. desc.info = ivp;
  925. crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
  926. }
  927. } break;
  928. case SW_TYPE_HMAC:
  929. case SW_TYPE_HASH:
  930. {
  931. char result[HASH_MAX_LEN];
  932. struct hash_desc desc;
  933. /* check we have room for the result */
  934. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  935. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  936. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  937. crd->crd_inject, sw->u.hmac.sw_mlen);
  938. crp->crp_etype = EINVAL;
  939. goto done;
  940. }
  941. memset(&desc, 0, sizeof(desc));
  942. desc.tfm = crypto_hash_cast(sw->sw_tfm);
  943. memset(result, 0, sizeof(result));
  944. if (sw->sw_type & SW_TYPE_HMAC) {
  945. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  946. crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
  947. req->sg, sg_num, result);
  948. #else
  949. crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
  950. sw->u.hmac.sw_klen);
  951. crypto_hash_digest(&desc, req->sg, sg_len, result);
  952. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  953. } else { /* SW_TYPE_HASH */
  954. crypto_hash_digest(&desc, req->sg, sg_len, result);
  955. }
  956. crypto_copyback(crp->crp_flags, crp->crp_buf,
  957. crd->crd_inject, sw->u.hmac.sw_mlen, result);
  958. }
  959. break;
  960. case SW_TYPE_COMP: {
  961. void *ibuf = NULL;
  962. void *obuf = sw->u.sw_comp_buf;
  963. int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
  964. int ret = 0;
  965. /*
  966. * we need to use an additional copy if there is more than one
  967. * input chunk since the kernel comp routines do not handle
  968. * SG yet. Otherwise we just use the input buffer as is.
  969. * Rather than allocate another buffer we just split the tmp
  970. * buffer we already have.
  971. * Perhaps we should just use zlib directly ?
  972. */
  973. if (sg_num > 1) {
  974. int blk;
  975. ibuf = obuf;
  976. for (blk = 0; blk < sg_num; blk++) {
  977. memcpy(obuf, sg_virt(&req->sg[blk]),
  978. req->sg[blk].length);
  979. obuf += req->sg[blk].length;
  980. }
  981. olen -= sg_len;
  982. } else
  983. ibuf = sg_virt(&req->sg[0]);
  984. if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
  985. ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
  986. ibuf, ilen, obuf, &olen);
  987. if (!ret && olen > crd->crd_len) {
  988. dprintk("cryptosoft: ERANGE compress %d into %d\n",
  989. crd->crd_len, olen);
  990. if (swcr_fail_if_compression_grows)
  991. ret = ERANGE;
  992. }
  993. } else { /* decompress */
  994. ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
  995. ibuf, ilen, obuf, &olen);
  996. if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
  997. dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
  998. "space for %d,at offset %d\n",
  999. crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
  1000. ret = ETOOSMALL;
  1001. }
  1002. }
  1003. if (ret)
  1004. dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
  1005. /*
  1006. * on success copy result back,
  1007. * linux crpyto API returns -errno, we need to fix that
  1008. */
  1009. crp->crp_etype = ret < 0 ? -ret : ret;
  1010. if (ret == 0) {
  1011. /* copy back the result and return it's size */
  1012. crypto_copyback(crp->crp_flags, crp->crp_buf,
  1013. crd->crd_inject, olen, obuf);
  1014. crp->crp_olen = olen;
  1015. }
  1016. } break;
  1017. default:
  1018. /* Unknown/unsupported algorithm */
  1019. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1020. crp->crp_etype = EINVAL;
  1021. goto done;
  1022. }
  1023. done:
  1024. swcr_process_req_complete(req);
  1025. }
  1026. /*
  1027. * Process a crypto request.
  1028. */
  1029. static int
  1030. swcr_process(device_t dev, struct cryptop *crp, int hint)
  1031. {
  1032. struct swcr_req *req = NULL;
  1033. u_int32_t lid;
  1034. dprintk("%s()\n", __FUNCTION__);
  1035. /* Sanity check */
  1036. if (crp == NULL) {
  1037. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1038. return EINVAL;
  1039. }
  1040. crp->crp_etype = 0;
  1041. if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
  1042. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1043. crp->crp_etype = EINVAL;
  1044. goto done;
  1045. }
  1046. lid = crp->crp_sid & 0xffffffff;
  1047. if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
  1048. swcr_sessions[lid] == NULL) {
  1049. crp->crp_etype = ENOENT;
  1050. dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
  1051. goto done;
  1052. }
  1053. /*
  1054. * do some error checking outside of the loop for SKB and IOV processing
  1055. * this leaves us with valid skb or uiop pointers for later
  1056. */
  1057. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  1058. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  1059. if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
  1060. printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
  1061. skb_shinfo(skb)->nr_frags);
  1062. goto done;
  1063. }
  1064. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  1065. struct uio *uiop = (struct uio *) crp->crp_buf;
  1066. if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
  1067. printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
  1068. uiop->uio_iovcnt);
  1069. goto done;
  1070. }
  1071. }
  1072. /*
  1073. * setup a new request ready for queuing
  1074. */
  1075. req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
  1076. if (req == NULL) {
  1077. dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
  1078. crp->crp_etype = ENOMEM;
  1079. goto done;
  1080. }
  1081. memset(req, 0, sizeof(*req));
  1082. req->sw_head = swcr_sessions[lid];
  1083. req->crp = crp;
  1084. req->crd = crp->crp_desc;
  1085. swcr_process_req(req);
  1086. return 0;
  1087. done:
  1088. crypto_done(crp);
  1089. if (req)
  1090. kmem_cache_free(swcr_req_cache, req);
  1091. return 0;
  1092. }
  1093. static int
  1094. cryptosoft_init(void)
  1095. {
  1096. int i, sw_type, mode;
  1097. char *algo;
  1098. dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
  1099. swcr_req_cache = kmem_cache_create("cryptosoft_req",
  1100. sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
  1101. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  1102. , NULL
  1103. #endif
  1104. );
  1105. if (!swcr_req_cache) {
  1106. printk("cryptosoft: failed to create request cache\n");
  1107. return -ENOENT;
  1108. }
  1109. softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
  1110. swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
  1111. CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
  1112. if (swcr_id < 0) {
  1113. printk("cryptosoft: Software crypto device cannot initialize!");
  1114. return -ENODEV;
  1115. }
  1116. #define REGISTER(alg) \
  1117. crypto_register(swcr_id, alg, 0,0)
  1118. for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
  1119. int found;
  1120. algo = crypto_details[i].alg_name;
  1121. if (!algo || !*algo) {
  1122. dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
  1123. continue;
  1124. }
  1125. mode = crypto_details[i].mode;
  1126. sw_type = crypto_details[i].sw_type;
  1127. found = 0;
  1128. switch (sw_type & SW_TYPE_ALG_MASK) {
  1129. case SW_TYPE_CIPHER:
  1130. found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
  1131. break;
  1132. case SW_TYPE_HMAC:
  1133. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1134. break;
  1135. case SW_TYPE_HASH:
  1136. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1137. break;
  1138. case SW_TYPE_COMP:
  1139. found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
  1140. break;
  1141. case SW_TYPE_BLKCIPHER:
  1142. found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
  1143. if (!found && !swcr_no_ablk)
  1144. found = crypto_has_ablkcipher(algo, 0, 0);
  1145. break;
  1146. }
  1147. if (found) {
  1148. REGISTER(i);
  1149. } else {
  1150. dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
  1151. __FUNCTION__, sw_type, i, algo);
  1152. }
  1153. }
  1154. return 0;
  1155. }
  1156. static void
  1157. cryptosoft_exit(void)
  1158. {
  1159. dprintk("%s()\n", __FUNCTION__);
  1160. crypto_unregister_all(swcr_id);
  1161. swcr_id = -1;
  1162. kmem_cache_destroy(swcr_req_cache);
  1163. }
  1164. late_initcall(cryptosoft_init);
  1165. module_exit(cryptosoft_exit);
  1166. MODULE_LICENSE("Dual BSD/GPL");
  1167. MODULE_AUTHOR("David McCullough <[email protected]>");
  1168. MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");