2
0

cryptosoft.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210
  1. /*
  2. * An OCF module that uses the linux kernel cryptoapi, based on the
  3. * original cryptosoft for BSD by Angelos D. Keromytis ([email protected])
  4. * but is mostly unrecognisable,
  5. *
  6. * Written by David McCullough <[email protected]>
  7. * Copyright (C) 2004-2010 David McCullough
  8. * Copyright (C) 2004-2005 Intel Corporation.
  9. *
  10. * LICENSE TERMS
  11. *
  12. * The free distribution and use of this software in both source and binary
  13. * form is allowed (with or without changes) provided that:
  14. *
  15. * 1. distributions of this source code include the above copyright
  16. * notice, this list of conditions and the following disclaimer;
  17. *
  18. * 2. distributions in binary form include the above copyright
  19. * notice, this list of conditions and the following disclaimer
  20. * in the documentation and/or other associated materials;
  21. *
  22. * 3. the copyright holder's name is not used to endorse products
  23. * built using this software without specific written permission.
  24. *
  25. * ALTERNATIVELY, provided that this notice is retained in full, this product
  26. * may be distributed under the terms of the GNU General Public License (GPL),
  27. * in which case the provisions of the GPL apply INSTEAD OF those given above.
  28. *
  29. * DISCLAIMER
  30. *
  31. * This software is provided 'as is' with no explicit or implied warranties
  32. * in respect of its properties, including, but not limited to, correctness
  33. * and/or fitness for purpose.
  34. * ---------------------------------------------------------------------------
  35. */
  36. #ifndef AUTOCONF_INCLUDED
  37. #include <linux/config.h>
  38. #endif
  39. #include <linux/module.h>
  40. #include <linux/init.h>
  41. #include <linux/list.h>
  42. #include <linux/slab.h>
  43. #include <linux/sched.h>
  44. #include <linux/wait.h>
  45. #include <linux/crypto.h>
  46. #include <linux/mm.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/random.h>
  49. #include <linux/version.h>
  50. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
  51. #include <linux/scatterlist.h>
  52. #endif
  53. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
  54. #include <crypto/hash.h>
  55. #endif
  56. #include <cryptodev.h>
  57. #include <uio.h>
  58. struct {
  59. softc_device_decl sc_dev;
  60. } swcr_softc;
  61. #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
  62. #define SW_TYPE_CIPHER 0x01
  63. #define SW_TYPE_HMAC 0x02
  64. #define SW_TYPE_HASH 0x04
  65. #define SW_TYPE_COMP 0x08
  66. #define SW_TYPE_BLKCIPHER 0x10
  67. #define SW_TYPE_ALG_MASK 0x1f
  68. #define SW_TYPE_ASYNC 0x8000
  69. /* We change some of the above if we have an async interface */
  70. #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
  71. #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
  72. #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
  73. #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
  74. #define SCATTERLIST_MAX 16
  75. struct swcr_data {
  76. int sw_type;
  77. int sw_alg;
  78. struct crypto_tfm *sw_tfm;
  79. union {
  80. struct {
  81. char *sw_key;
  82. int sw_klen;
  83. int sw_mlen;
  84. } hmac;
  85. void *sw_comp_buf;
  86. } u;
  87. struct swcr_data *sw_next;
  88. };
  89. struct swcr_req {
  90. struct swcr_data *sw_head;
  91. struct swcr_data *sw;
  92. struct cryptop *crp;
  93. struct cryptodesc *crd;
  94. struct scatterlist sg[SCATTERLIST_MAX];
  95. unsigned char iv[EALG_MAX_BLOCK_LEN];
  96. char result[HASH_MAX_LEN];
  97. void *crypto_req;
  98. };
  99. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  100. static kmem_cache_t *swcr_req_cache;
  101. #else
  102. static struct kmem_cache *swcr_req_cache;
  103. #endif
  104. #ifndef CRYPTO_TFM_MODE_CBC
  105. /*
  106. * As of linux-2.6.21 this is no longer defined, and presumably no longer
  107. * needed to be passed into the crypto core code.
  108. */
  109. #define CRYPTO_TFM_MODE_CBC 0
  110. #define CRYPTO_TFM_MODE_ECB 0
  111. #endif
  112. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  113. /*
  114. * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
  115. * API into old API.
  116. */
  117. /* Symmetric/Block Cipher */
  118. struct blkcipher_desc
  119. {
  120. struct crypto_tfm *tfm;
  121. void *info;
  122. };
  123. #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
  124. #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
  125. #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
  126. #define crypto_blkcipher_cast(X) X
  127. #define crypto_blkcipher_tfm(X) X
  128. #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
  129. #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
  130. #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
  131. #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
  132. #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
  133. crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  134. #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
  135. crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  136. #define crypto_blkcipher_set_flags(x, y) /* nop */
  137. /* Hash/HMAC/Digest */
  138. struct hash_desc
  139. {
  140. struct crypto_tfm *tfm;
  141. };
  142. #define hmac(X) #X , 0
  143. #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
  144. #define crypto_hash_cast(X) X
  145. #define crypto_hash_tfm(X) X
  146. #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
  147. #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
  148. #define crypto_hash_digest(W, X, Y, Z) \
  149. crypto_digest_digest((W)->tfm, X, sg_num, Z)
  150. /* Asymmetric Cipher */
  151. #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
  152. /* Compression */
  153. #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
  154. #define crypto_comp_tfm(X) X
  155. #define crypto_comp_cast(X) X
  156. #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
  157. #define plain(X) #X , 0
  158. #else
  159. #define ecb(X) "ecb(" #X ")" , 0
  160. #define cbc(X) "cbc(" #X ")" , 0
  161. #define hmac(X) "hmac(" #X ")" , 0
  162. #define plain(X) #X , 0
  163. #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  164. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
  165. /* no ablkcipher in older kernels */
  166. #define crypto_alloc_ablkcipher(a,b,c) (NULL)
  167. #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
  168. #define crypto_ablkcipher_set_flags(a, b) /* nop */
  169. #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
  170. #define crypto_has_ablkcipher(a,b,c) (0)
  171. #else
  172. #define HAVE_ABLKCIPHER
  173. #endif
  174. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
  175. /* no ahash in older kernels */
  176. #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
  177. #define crypto_alloc_ahash(a,b,c) (NULL)
  178. #define crypto_ahash_digestsize(x) 0
  179. #else
  180. #define HAVE_AHASH
  181. #endif
  182. struct crypto_details {
  183. char *alg_name;
  184. int mode;
  185. int sw_type;
  186. };
  187. static struct crypto_details crypto_details[] = {
  188. [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
  189. [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
  190. [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
  191. [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
  192. [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
  193. [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
  194. [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
  195. [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
  196. [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
  197. [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
  198. [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
  199. [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
  200. [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
  201. [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
  202. [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
  203. [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
  204. [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
  205. [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
  206. [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
  207. [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
  208. [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
  209. [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
  210. [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
  211. [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
  212. [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
  213. };
  214. int32_t swcr_id = -1;
  215. module_param(swcr_id, int, 0444);
  216. MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
  217. int swcr_fail_if_compression_grows = 1;
  218. module_param(swcr_fail_if_compression_grows, int, 0644);
  219. MODULE_PARM_DESC(swcr_fail_if_compression_grows,
  220. "Treat compression that results in more data as a failure");
  221. int swcr_no_ahash = 0;
  222. module_param(swcr_no_ahash, int, 0644);
  223. MODULE_PARM_DESC(swcr_no_ahash,
  224. "Do not use async hash/hmac even if available");
  225. int swcr_no_ablk = 0;
  226. module_param(swcr_no_ablk, int, 0644);
  227. MODULE_PARM_DESC(swcr_no_ablk,
  228. "Do not use async blk ciphers even if available");
  229. static struct swcr_data **swcr_sessions = NULL;
  230. static u_int32_t swcr_sesnum = 0;
  231. static int swcr_process(device_t, struct cryptop *, int);
  232. static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
  233. static int swcr_freesession(device_t, u_int64_t);
  234. static device_method_t swcr_methods = {
  235. /* crypto device methods */
  236. DEVMETHOD(cryptodev_newsession, swcr_newsession),
  237. DEVMETHOD(cryptodev_freesession,swcr_freesession),
  238. DEVMETHOD(cryptodev_process, swcr_process),
  239. };
  240. #define debug swcr_debug
  241. int swcr_debug = 0;
  242. module_param(swcr_debug, int, 0644);
  243. MODULE_PARM_DESC(swcr_debug, "Enable debug");
  244. static void swcr_process_req(struct swcr_req *req);
  245. /*
  246. * Generate a new software session.
  247. */
  248. static int
  249. swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
  250. {
  251. struct swcr_data **swd;
  252. u_int32_t i;
  253. int error;
  254. char *algo;
  255. int mode;
  256. dprintk("%s()\n", __FUNCTION__);
  257. if (sid == NULL || cri == NULL) {
  258. dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
  259. return EINVAL;
  260. }
  261. if (swcr_sessions) {
  262. for (i = 1; i < swcr_sesnum; i++)
  263. if (swcr_sessions[i] == NULL)
  264. break;
  265. } else
  266. i = 1; /* NB: to silence compiler warning */
  267. if (swcr_sessions == NULL || i == swcr_sesnum) {
  268. if (swcr_sessions == NULL) {
  269. i = 1; /* We leave swcr_sessions[0] empty */
  270. swcr_sesnum = CRYPTO_SW_SESSIONS;
  271. } else
  272. swcr_sesnum *= 2;
  273. swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
  274. if (swd == NULL) {
  275. /* Reset session number */
  276. if (swcr_sesnum == CRYPTO_SW_SESSIONS)
  277. swcr_sesnum = 0;
  278. else
  279. swcr_sesnum /= 2;
  280. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  281. return ENOBUFS;
  282. }
  283. memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
  284. /* Copy existing sessions */
  285. if (swcr_sessions) {
  286. memcpy(swd, swcr_sessions,
  287. (swcr_sesnum / 2) * sizeof(struct swcr_data *));
  288. kfree(swcr_sessions);
  289. }
  290. swcr_sessions = swd;
  291. }
  292. swd = &swcr_sessions[i];
  293. *sid = i;
  294. while (cri) {
  295. *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
  296. SLAB_ATOMIC);
  297. if (*swd == NULL) {
  298. swcr_freesession(NULL, i);
  299. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  300. return ENOBUFS;
  301. }
  302. memset(*swd, 0, sizeof(struct swcr_data));
  303. if (cri->cri_alg < 0 ||
  304. cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
  305. printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
  306. swcr_freesession(NULL, i);
  307. return EINVAL;
  308. }
  309. algo = crypto_details[cri->cri_alg].alg_name;
  310. if (!algo || !*algo) {
  311. printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
  312. swcr_freesession(NULL, i);
  313. return EINVAL;
  314. }
  315. mode = crypto_details[cri->cri_alg].mode;
  316. (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
  317. (*swd)->sw_alg = cri->cri_alg;
  318. /* Algorithm specific configuration */
  319. switch (cri->cri_alg) {
  320. case CRYPTO_NULL_CBC:
  321. cri->cri_klen = 0; /* make it work with crypto API */
  322. break;
  323. default:
  324. break;
  325. }
  326. if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
  327. dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
  328. algo, mode);
  329. /* try async first */
  330. (*swd)->sw_tfm = swcr_no_ablk ? NULL :
  331. crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
  332. if ((*swd)->sw_tfm) {
  333. dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
  334. (*swd)->sw_type |= SW_TYPE_ASYNC;
  335. } else {
  336. dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
  337. (*swd)->sw_tfm = crypto_blkcipher_tfm(
  338. crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
  339. }
  340. if (!(*swd)->sw_tfm) {
  341. dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
  342. algo,mode);
  343. swcr_freesession(NULL, i);
  344. return EINVAL;
  345. }
  346. if (debug) {
  347. dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
  348. __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
  349. for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
  350. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  351. cri->cri_key[i] & 0xff);
  352. dprintk("\n");
  353. }
  354. if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  355. /* OCF doesn't enforce keys */
  356. crypto_ablkcipher_set_flags(
  357. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  358. CRYPTO_TFM_REQ_WEAK_KEY);
  359. error = crypto_ablkcipher_setkey(
  360. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  361. cri->cri_key, (cri->cri_klen + 7) / 8);
  362. } else {
  363. /* OCF doesn't enforce keys */
  364. crypto_blkcipher_set_flags(
  365. crypto_blkcipher_cast((*swd)->sw_tfm),
  366. CRYPTO_TFM_REQ_WEAK_KEY);
  367. error = crypto_blkcipher_setkey(
  368. crypto_blkcipher_cast((*swd)->sw_tfm),
  369. cri->cri_key, (cri->cri_klen + 7) / 8);
  370. }
  371. if (error) {
  372. printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
  373. (*swd)->sw_tfm->crt_flags);
  374. swcr_freesession(NULL, i);
  375. return error;
  376. }
  377. } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
  378. dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
  379. algo, mode);
  380. /* try async first */
  381. (*swd)->sw_tfm = swcr_no_ahash ? NULL :
  382. crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
  383. if ((*swd)->sw_tfm) {
  384. dprintk("%s %s hash is async\n", __FUNCTION__, algo);
  385. (*swd)->sw_type |= SW_TYPE_ASYNC;
  386. } else {
  387. dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
  388. (*swd)->sw_tfm = crypto_hash_tfm(
  389. crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
  390. }
  391. if (!(*swd)->sw_tfm) {
  392. dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
  393. algo, mode);
  394. swcr_freesession(NULL, i);
  395. return EINVAL;
  396. }
  397. (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
  398. (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
  399. SLAB_ATOMIC);
  400. if ((*swd)->u.hmac.sw_key == NULL) {
  401. swcr_freesession(NULL, i);
  402. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  403. return ENOBUFS;
  404. }
  405. memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
  406. if (cri->cri_mlen) {
  407. (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
  408. } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  409. (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
  410. __crypto_ahash_cast((*swd)->sw_tfm));
  411. } else {
  412. (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
  413. crypto_hash_cast((*swd)->sw_tfm));
  414. }
  415. } else if ((*swd)->sw_type & SW_TYPE_COMP) {
  416. (*swd)->sw_tfm = crypto_comp_tfm(
  417. crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
  418. if (!(*swd)->sw_tfm) {
  419. dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
  420. algo, mode);
  421. swcr_freesession(NULL, i);
  422. return EINVAL;
  423. }
  424. (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
  425. if ((*swd)->u.sw_comp_buf == NULL) {
  426. swcr_freesession(NULL, i);
  427. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  428. return ENOBUFS;
  429. }
  430. } else {
  431. printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
  432. swcr_freesession(NULL, i);
  433. return EINVAL;
  434. }
  435. cri = cri->cri_next;
  436. swd = &((*swd)->sw_next);
  437. }
  438. return 0;
  439. }
  440. /*
  441. * Free a session.
  442. */
  443. static int
  444. swcr_freesession(device_t dev, u_int64_t tid)
  445. {
  446. struct swcr_data *swd;
  447. u_int32_t sid = CRYPTO_SESID2LID(tid);
  448. dprintk("%s()\n", __FUNCTION__);
  449. if (sid > swcr_sesnum || swcr_sessions == NULL ||
  450. swcr_sessions[sid] == NULL) {
  451. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  452. return(EINVAL);
  453. }
  454. /* Silently accept and return */
  455. if (sid == 0)
  456. return(0);
  457. while ((swd = swcr_sessions[sid]) != NULL) {
  458. swcr_sessions[sid] = swd->sw_next;
  459. if (swd->sw_tfm) {
  460. switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
  461. #ifdef HAVE_AHASH
  462. case SW_TYPE_AHMAC:
  463. case SW_TYPE_AHASH:
  464. crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
  465. break;
  466. #endif
  467. #ifdef HAVE_ABLKCIPHER
  468. case SW_TYPE_ABLKCIPHER:
  469. crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
  470. break;
  471. #endif
  472. case SW_TYPE_BLKCIPHER:
  473. crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
  474. break;
  475. case SW_TYPE_HMAC:
  476. case SW_TYPE_HASH:
  477. crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
  478. break;
  479. case SW_TYPE_COMP:
  480. crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
  481. default:
  482. crypto_free_tfm(swd->sw_tfm);
  483. break;
  484. }
  485. swd->sw_tfm = NULL;
  486. }
  487. if (swd->sw_type & SW_TYPE_COMP) {
  488. if (swd->u.sw_comp_buf)
  489. kfree(swd->u.sw_comp_buf);
  490. } else {
  491. if (swd->u.hmac.sw_key)
  492. kfree(swd->u.hmac.sw_key);
  493. }
  494. kfree(swd);
  495. }
  496. return 0;
  497. }
  498. #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
  499. /* older kernels had no async interface */
  500. static void swcr_process_callback(struct crypto_async_request *creq, int err)
  501. {
  502. struct swcr_req *req = creq->data;
  503. dprintk("%s()\n", __FUNCTION__);
  504. if (err) {
  505. if (err == -EINPROGRESS)
  506. return;
  507. dprintk("%s() fail %d\n", __FUNCTION__, -err);
  508. req->crp->crp_etype = -err;
  509. goto done;
  510. }
  511. switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
  512. case SW_TYPE_AHMAC:
  513. case SW_TYPE_AHASH:
  514. crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
  515. req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
  516. ahash_request_free(req->crypto_req);
  517. break;
  518. case SW_TYPE_ABLKCIPHER:
  519. ablkcipher_request_free(req->crypto_req);
  520. break;
  521. default:
  522. req->crp->crp_etype = EINVAL;
  523. goto done;
  524. }
  525. req->crd = req->crd->crd_next;
  526. if (req->crd) {
  527. swcr_process_req(req);
  528. return;
  529. }
  530. done:
  531. dprintk("%s crypto_done %p\n", __FUNCTION__, req);
  532. crypto_done(req->crp);
  533. kmem_cache_free(swcr_req_cache, req);
  534. }
  535. #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
  536. static void swcr_process_req(struct swcr_req *req)
  537. {
  538. struct swcr_data *sw;
  539. struct cryptop *crp = req->crp;
  540. struct cryptodesc *crd = req->crd;
  541. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  542. struct uio *uiop = (struct uio *) crp->crp_buf;
  543. int sg_num, sg_len, skip;
  544. dprintk("%s()\n", __FUNCTION__);
  545. /*
  546. * Find the crypto context.
  547. *
  548. * XXX Note that the logic here prevents us from having
  549. * XXX the same algorithm multiple times in a session
  550. * XXX (or rather, we can but it won't give us the right
  551. * XXX results). To do that, we'd need some way of differentiating
  552. * XXX between the various instances of an algorithm (so we can
  553. * XXX locate the correct crypto context).
  554. */
  555. for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
  556. ;
  557. /* No such context ? */
  558. if (sw == NULL) {
  559. crp->crp_etype = EINVAL;
  560. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  561. goto done;
  562. }
  563. req->sw = sw;
  564. skip = crd->crd_skip;
  565. /*
  566. * setup the SG list skip from the start of the buffer
  567. */
  568. memset(req->sg, 0, sizeof(req->sg));
  569. sg_init_table(req->sg, SCATTERLIST_MAX);
  570. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  571. int i, len;
  572. sg_num = 0;
  573. sg_len = 0;
  574. if (skip < skb_headlen(skb)) {
  575. len = skb_headlen(skb) - skip;
  576. if (len + sg_len > crd->crd_len)
  577. len = crd->crd_len - sg_len;
  578. sg_set_page(&req->sg[sg_num],
  579. virt_to_page(skb->data + skip), len,
  580. offset_in_page(skb->data + skip));
  581. sg_len += len;
  582. sg_num++;
  583. skip = 0;
  584. } else
  585. skip -= skb_headlen(skb);
  586. for (i = 0; sg_len < crd->crd_len &&
  587. i < skb_shinfo(skb)->nr_frags &&
  588. sg_num < SCATTERLIST_MAX; i++) {
  589. if (skip < skb_shinfo(skb)->frags[i].size) {
  590. len = skb_shinfo(skb)->frags[i].size - skip;
  591. if (len + sg_len > crd->crd_len)
  592. len = crd->crd_len - sg_len;
  593. sg_set_page(&req->sg[sg_num],
  594. skb_shinfo(skb)->frags[i].page,
  595. len,
  596. skb_shinfo(skb)->frags[i].page_offset + skip);
  597. sg_len += len;
  598. sg_num++;
  599. skip = 0;
  600. } else
  601. skip -= skb_shinfo(skb)->frags[i].size;
  602. }
  603. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  604. int len;
  605. sg_len = 0;
  606. for (sg_num = 0; sg_len < crd->crd_len &&
  607. sg_num < uiop->uio_iovcnt &&
  608. sg_num < SCATTERLIST_MAX; sg_num++) {
  609. if (skip <= uiop->uio_iov[sg_num].iov_len) {
  610. len = uiop->uio_iov[sg_num].iov_len - skip;
  611. if (len + sg_len > crd->crd_len)
  612. len = crd->crd_len - sg_len;
  613. sg_set_page(&req->sg[sg_num],
  614. virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
  615. len,
  616. offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
  617. sg_len += len;
  618. skip = 0;
  619. } else
  620. skip -= uiop->uio_iov[sg_num].iov_len;
  621. }
  622. } else {
  623. sg_len = (crp->crp_ilen - skip);
  624. if (sg_len > crd->crd_len)
  625. sg_len = crd->crd_len;
  626. sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
  627. sg_len, offset_in_page(crp->crp_buf + skip));
  628. sg_num = 1;
  629. }
  630. switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
  631. #ifdef HAVE_AHASH
  632. case SW_TYPE_AHMAC:
  633. case SW_TYPE_AHASH:
  634. {
  635. int ret;
  636. /* check we have room for the result */
  637. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  638. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  639. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  640. crd->crd_inject, sw->u.hmac.sw_mlen);
  641. crp->crp_etype = EINVAL;
  642. goto done;
  643. }
  644. req->crypto_req =
  645. ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_KERNEL);
  646. if (!req->crypto_req) {
  647. crp->crp_etype = ENOMEM;
  648. dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
  649. goto done;
  650. }
  651. ahash_request_set_callback(req->crypto_req,
  652. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  653. memset(req->result, 0, sizeof(req->result));
  654. if (sw->sw_type & SW_TYPE_AHMAC)
  655. crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
  656. sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
  657. ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
  658. ret = crypto_ahash_digest(req->crypto_req);
  659. switch (ret) {
  660. case -EINPROGRESS:
  661. case -EBUSY:
  662. return;
  663. default:
  664. case 0:
  665. dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
  666. crp->crp_etype = ret;
  667. ahash_request_free(req->crypto_req);
  668. goto done;
  669. }
  670. } break;
  671. #endif /* HAVE_AHASH */
  672. #ifdef HAVE_ABLKCIPHER
  673. case SW_TYPE_ABLKCIPHER: {
  674. int ret;
  675. unsigned char *ivp = req->iv;
  676. int ivsize =
  677. crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
  678. if (sg_len < crypto_ablkcipher_blocksize(
  679. __crypto_ablkcipher_cast(sw->sw_tfm))) {
  680. crp->crp_etype = EINVAL;
  681. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  682. sg_len, crypto_ablkcipher_blocksize(
  683. __crypto_ablkcipher_cast(sw->sw_tfm)));
  684. goto done;
  685. }
  686. if (ivsize > sizeof(req->iv)) {
  687. crp->crp_etype = EINVAL;
  688. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  689. goto done;
  690. }
  691. req->crypto_req = ablkcipher_request_alloc(
  692. __crypto_ablkcipher_cast(sw->sw_tfm), GFP_KERNEL);
  693. if (!req->crypto_req) {
  694. crp->crp_etype = ENOMEM;
  695. dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
  696. __FILE__, __LINE__);
  697. goto done;
  698. }
  699. ablkcipher_request_set_callback(req->crypto_req,
  700. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  701. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  702. int i, error;
  703. if (debug) {
  704. dprintk("%s key:", __FUNCTION__);
  705. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  706. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  707. crd->crd_key[i] & 0xff);
  708. dprintk("\n");
  709. }
  710. /* OCF doesn't enforce keys */
  711. crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
  712. CRYPTO_TFM_REQ_WEAK_KEY);
  713. error = crypto_ablkcipher_setkey(
  714. __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
  715. (crd->crd_klen + 7) / 8);
  716. if (error) {
  717. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  718. error, sw->sw_tfm->crt_flags);
  719. crp->crp_etype = -error;
  720. }
  721. }
  722. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  723. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  724. ivp = crd->crd_iv;
  725. else
  726. get_random_bytes(ivp, ivsize);
  727. /*
  728. * do we have to copy the IV back to the buffer ?
  729. */
  730. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  731. crypto_copyback(crp->crp_flags, crp->crp_buf,
  732. crd->crd_inject, ivsize, (caddr_t)ivp);
  733. }
  734. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  735. sg_len, ivp);
  736. ret = crypto_ablkcipher_encrypt(req->crypto_req);
  737. } else { /*decrypt */
  738. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  739. ivp = crd->crd_iv;
  740. else
  741. crypto_copydata(crp->crp_flags, crp->crp_buf,
  742. crd->crd_inject, ivsize, (caddr_t)ivp);
  743. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  744. sg_len, ivp);
  745. ret = crypto_ablkcipher_decrypt(req->crypto_req);
  746. }
  747. switch (ret) {
  748. case -EINPROGRESS:
  749. case -EBUSY:
  750. return;
  751. default:
  752. case 0:
  753. dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
  754. crp->crp_etype = ret;
  755. goto done;
  756. }
  757. } break;
  758. #endif /* HAVE_ABLKCIPHER */
  759. case SW_TYPE_BLKCIPHER: {
  760. unsigned char iv[EALG_MAX_BLOCK_LEN];
  761. unsigned char *ivp = iv;
  762. struct blkcipher_desc desc;
  763. int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
  764. if (sg_len < crypto_blkcipher_blocksize(
  765. crypto_blkcipher_cast(sw->sw_tfm))) {
  766. crp->crp_etype = EINVAL;
  767. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  768. sg_len, crypto_blkcipher_blocksize(
  769. crypto_blkcipher_cast(sw->sw_tfm)));
  770. goto done;
  771. }
  772. if (ivsize > sizeof(iv)) {
  773. crp->crp_etype = EINVAL;
  774. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  775. goto done;
  776. }
  777. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  778. int i, error;
  779. if (debug) {
  780. dprintk("%s key:", __FUNCTION__);
  781. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  782. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  783. crd->crd_key[i] & 0xff);
  784. dprintk("\n");
  785. }
  786. /* OCF doesn't enforce keys */
  787. crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
  788. CRYPTO_TFM_REQ_WEAK_KEY);
  789. error = crypto_blkcipher_setkey(
  790. crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
  791. (crd->crd_klen + 7) / 8);
  792. if (error) {
  793. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  794. error, sw->sw_tfm->crt_flags);
  795. crp->crp_etype = -error;
  796. }
  797. }
  798. memset(&desc, 0, sizeof(desc));
  799. desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
  800. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  801. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  802. ivp = crd->crd_iv;
  803. } else {
  804. get_random_bytes(ivp, ivsize);
  805. }
  806. /*
  807. * do we have to copy the IV back to the buffer ?
  808. */
  809. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  810. crypto_copyback(crp->crp_flags, crp->crp_buf,
  811. crd->crd_inject, ivsize, (caddr_t)ivp);
  812. }
  813. desc.info = ivp;
  814. crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
  815. } else { /*decrypt */
  816. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  817. ivp = crd->crd_iv;
  818. } else {
  819. crypto_copydata(crp->crp_flags, crp->crp_buf,
  820. crd->crd_inject, ivsize, (caddr_t)ivp);
  821. }
  822. desc.info = ivp;
  823. crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
  824. }
  825. } break;
  826. case SW_TYPE_HMAC:
  827. case SW_TYPE_HASH:
  828. {
  829. char result[HASH_MAX_LEN];
  830. struct hash_desc desc;
  831. /* check we have room for the result */
  832. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  833. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  834. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  835. crd->crd_inject, sw->u.hmac.sw_mlen);
  836. crp->crp_etype = EINVAL;
  837. goto done;
  838. }
  839. memset(&desc, 0, sizeof(desc));
  840. desc.tfm = crypto_hash_cast(sw->sw_tfm);
  841. memset(result, 0, sizeof(result));
  842. if (sw->sw_type & SW_TYPE_HMAC) {
  843. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  844. crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
  845. req->sg, sg_num, result);
  846. #else
  847. crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
  848. sw->u.hmac.sw_klen);
  849. crypto_hash_digest(&desc, req->sg, sg_len, result);
  850. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  851. } else { /* SW_TYPE_HASH */
  852. crypto_hash_digest(&desc, req->sg, sg_len, result);
  853. }
  854. crypto_copyback(crp->crp_flags, crp->crp_buf,
  855. crd->crd_inject, sw->u.hmac.sw_mlen, result);
  856. }
  857. break;
  858. case SW_TYPE_COMP: {
  859. void *ibuf = NULL;
  860. void *obuf = sw->u.sw_comp_buf;
  861. int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
  862. int ret = 0;
  863. /*
  864. * we need to use an additional copy if there is more than one
  865. * input chunk since the kernel comp routines do not handle
  866. * SG yet. Otherwise we just use the input buffer as is.
  867. * Rather than allocate another buffer we just split the tmp
  868. * buffer we already have.
  869. * Perhaps we should just use zlib directly ?
  870. */
  871. if (sg_num > 1) {
  872. int blk;
  873. ibuf = obuf;
  874. for (blk = 0; blk < sg_num; blk++) {
  875. memcpy(obuf, sg_virt(&req->sg[blk]),
  876. req->sg[blk].length);
  877. obuf += req->sg[blk].length;
  878. }
  879. olen -= sg_len;
  880. } else
  881. ibuf = sg_virt(&req->sg[0]);
  882. if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
  883. ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
  884. ibuf, ilen, obuf, &olen);
  885. if (!ret && olen > crd->crd_len) {
  886. dprintk("cryptosoft: ERANGE compress %d into %d\n",
  887. crd->crd_len, olen);
  888. if (swcr_fail_if_compression_grows)
  889. ret = ERANGE;
  890. }
  891. } else { /* decompress */
  892. ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
  893. ibuf, ilen, obuf, &olen);
  894. if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
  895. dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
  896. "space for %d,at offset %d\n",
  897. crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
  898. ret = ETOOSMALL;
  899. }
  900. }
  901. if (ret)
  902. dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
  903. /*
  904. * on success copy result back,
  905. * linux crpyto API returns -errno, we need to fix that
  906. */
  907. crp->crp_etype = ret < 0 ? -ret : ret;
  908. if (ret == 0) {
  909. /* copy back the result and return it's size */
  910. crypto_copyback(crp->crp_flags, crp->crp_buf,
  911. crd->crd_inject, olen, obuf);
  912. crp->crp_olen = olen;
  913. }
  914. } break;
  915. default:
  916. /* Unknown/unsupported algorithm */
  917. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  918. crp->crp_etype = EINVAL;
  919. goto done;
  920. }
  921. done:
  922. crypto_done(crp);
  923. kmem_cache_free(swcr_req_cache, req);
  924. }
  925. /*
  926. * Process a crypto request.
  927. */
  928. static int
  929. swcr_process(device_t dev, struct cryptop *crp, int hint)
  930. {
  931. struct swcr_req *req = NULL;
  932. u_int32_t lid;
  933. dprintk("%s()\n", __FUNCTION__);
  934. /* Sanity check */
  935. if (crp == NULL) {
  936. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  937. return EINVAL;
  938. }
  939. crp->crp_etype = 0;
  940. if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
  941. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  942. crp->crp_etype = EINVAL;
  943. goto done;
  944. }
  945. lid = crp->crp_sid & 0xffffffff;
  946. if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
  947. swcr_sessions[lid] == NULL) {
  948. crp->crp_etype = ENOENT;
  949. dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
  950. goto done;
  951. }
  952. /*
  953. * do some error checking outside of the loop for SKB and IOV processing
  954. * this leaves us with valid skb or uiop pointers for later
  955. */
  956. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  957. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  958. if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
  959. printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
  960. skb_shinfo(skb)->nr_frags);
  961. goto done;
  962. }
  963. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  964. struct uio *uiop = (struct uio *) crp->crp_buf;
  965. if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
  966. printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
  967. uiop->uio_iovcnt);
  968. goto done;
  969. }
  970. }
  971. /*
  972. * setup a new request ready for queuing
  973. */
  974. req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
  975. if (req == NULL) {
  976. dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
  977. crp->crp_etype = ENOMEM;
  978. goto done;
  979. }
  980. memset(req, 0, sizeof(*req));
  981. req->sw_head = swcr_sessions[lid];
  982. req->crp = crp;
  983. req->crd = crp->crp_desc;
  984. swcr_process_req(req);
  985. return 0;
  986. done:
  987. crypto_done(crp);
  988. if (req)
  989. kmem_cache_free(swcr_req_cache, req);
  990. return 0;
  991. }
  992. static int
  993. cryptosoft_init(void)
  994. {
  995. int i, sw_type, mode;
  996. char *algo;
  997. dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
  998. swcr_req_cache = kmem_cache_create("cryptosoft_req",
  999. sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
  1000. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  1001. , NULL
  1002. #endif
  1003. );
  1004. if (!swcr_req_cache) {
  1005. printk("cryptosoft: failed to create request cache\n");
  1006. return -ENOENT;
  1007. }
  1008. softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
  1009. swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
  1010. CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
  1011. if (swcr_id < 0) {
  1012. printk("cryptosoft: Software crypto device cannot initialize!");
  1013. return -ENODEV;
  1014. }
  1015. #define REGISTER(alg) \
  1016. crypto_register(swcr_id, alg, 0,0)
  1017. for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
  1018. int found;
  1019. algo = crypto_details[i].alg_name;
  1020. if (!algo || !*algo) {
  1021. dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
  1022. continue;
  1023. }
  1024. mode = crypto_details[i].mode;
  1025. sw_type = crypto_details[i].sw_type;
  1026. found = 0;
  1027. switch (sw_type & SW_TYPE_ALG_MASK) {
  1028. case SW_TYPE_CIPHER:
  1029. found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
  1030. break;
  1031. case SW_TYPE_HMAC:
  1032. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1033. break;
  1034. case SW_TYPE_HASH:
  1035. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1036. break;
  1037. case SW_TYPE_COMP:
  1038. found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
  1039. break;
  1040. case SW_TYPE_BLKCIPHER:
  1041. found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
  1042. if (!found && !swcr_no_ablk)
  1043. found = crypto_has_ablkcipher(algo, 0, 0);
  1044. break;
  1045. }
  1046. if (found) {
  1047. REGISTER(i);
  1048. } else {
  1049. dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
  1050. __FUNCTION__, sw_type, i, algo);
  1051. }
  1052. }
  1053. return 0;
  1054. }
  1055. static void
  1056. cryptosoft_exit(void)
  1057. {
  1058. dprintk("%s()\n", __FUNCTION__);
  1059. crypto_unregister_all(swcr_id);
  1060. swcr_id = -1;
  1061. kmem_cache_destroy(swcr_req_cache);
  1062. }
  1063. late_initcall(cryptosoft_init);
  1064. module_exit(cryptosoft_exit);
  1065. MODULE_LICENSE("Dual BSD/GPL");
  1066. MODULE_AUTHOR("David McCullough <[email protected]>");
  1067. MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");