2
0

ixp4xx.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. /*
  2. * An OCF module that uses Intels IXP CryptACC API to do the crypto.
  3. * This driver requires the IXP400 Access Library that is available
  4. * from Intel in order to operate (or compile).
  5. *
  6. * Written by David McCullough <[email protected]>
  7. * Copyright (C) 2006-2010 David McCullough
  8. * Copyright (C) 2004-2005 Intel Corporation.
  9. *
  10. * LICENSE TERMS
  11. *
  12. * The free distribution and use of this software in both source and binary
  13. * form is allowed (with or without changes) provided that:
  14. *
  15. * 1. distributions of this source code include the above copyright
  16. * notice, this list of conditions and the following disclaimer;
  17. *
  18. * 2. distributions in binary form include the above copyright
  19. * notice, this list of conditions and the following disclaimer
  20. * in the documentation and/or other associated materials;
  21. *
  22. * 3. the copyright holder's name is not used to endorse products
  23. * built using this software without specific written permission.
  24. *
  25. * ALTERNATIVELY, provided that this notice is retained in full, this product
  26. * may be distributed under the terms of the GNU General Public License (GPL),
  27. * in which case the provisions of the GPL apply INSTEAD OF those given above.
  28. *
  29. * DISCLAIMER
  30. *
  31. * This software is provided 'as is' with no explicit or implied warranties
  32. * in respect of its properties, including, but not limited to, correctness
  33. * and/or fitness for purpose.
  34. */
  35. #ifndef AUTOCONF_INCLUDED
  36. #include <linux/config.h>
  37. #endif
  38. #include <linux/module.h>
  39. #include <linux/init.h>
  40. #include <linux/list.h>
  41. #include <linux/slab.h>
  42. #include <linux/sched.h>
  43. #include <linux/wait.h>
  44. #include <linux/crypto.h>
  45. #include <linux/interrupt.h>
  46. #include <asm/scatterlist.h>
  47. #include <IxTypes.h>
  48. #include <IxOsBuffMgt.h>
  49. #include <IxNpeDl.h>
  50. #include <IxCryptoAcc.h>
  51. #include <IxQMgr.h>
  52. #include <IxOsServices.h>
  53. #include <IxOsCacheMMU.h>
  54. #include <cryptodev.h>
  55. #include <uio.h>
  56. #ifndef IX_MBUF_PRIV
  57. #define IX_MBUF_PRIV(x) ((x)->priv)
  58. #endif
  59. struct ixp_data;
  60. struct ixp_q {
  61. struct list_head ixp_q_list;
  62. struct ixp_data *ixp_q_data;
  63. struct cryptop *ixp_q_crp;
  64. struct cryptodesc *ixp_q_ccrd;
  65. struct cryptodesc *ixp_q_acrd;
  66. IX_MBUF ixp_q_mbuf;
  67. UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
  68. UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
  69. unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
  70. unsigned char *ixp_q_iv;
  71. };
  72. struct ixp_data {
  73. int ixp_registered; /* is the context registered */
  74. int ixp_crd_flags; /* detect direction changes */
  75. int ixp_cipher_alg;
  76. int ixp_auth_alg;
  77. UINT32 ixp_ctx_id;
  78. UINT32 ixp_hash_key_id; /* used when hashing */
  79. IxCryptoAccCtx ixp_ctx;
  80. IX_MBUF ixp_pri_mbuf;
  81. IX_MBUF ixp_sec_mbuf;
  82. struct work_struct ixp_pending_work;
  83. struct work_struct ixp_registration_work;
  84. struct list_head ixp_q; /* unprocessed requests */
  85. };
  86. #ifdef __ixp46X
  87. #define MAX_IOP_SIZE 64 /* words */
  88. #define MAX_OOP_SIZE 128
  89. #define MAX_PARAMS 3
  90. struct ixp_pkq {
  91. struct list_head pkq_list;
  92. struct cryptkop *pkq_krp;
  93. IxCryptoAccPkeEauInOperands pkq_op;
  94. IxCryptoAccPkeEauOpResult pkq_result;
  95. UINT32 pkq_ibuf0[MAX_IOP_SIZE];
  96. UINT32 pkq_ibuf1[MAX_IOP_SIZE];
  97. UINT32 pkq_ibuf2[MAX_IOP_SIZE];
  98. UINT32 pkq_obuf[MAX_OOP_SIZE];
  99. };
  100. static LIST_HEAD(ixp_pkq); /* current PK wait list */
  101. static struct ixp_pkq *ixp_pk_cur;
  102. static spinlock_t ixp_pkq_lock;
  103. #endif /* __ixp46X */
  104. static int ixp_blocked = 0;
  105. static int32_t ixp_id = -1;
  106. static struct ixp_data **ixp_sessions = NULL;
  107. static u_int32_t ixp_sesnum = 0;
  108. static int ixp_process(device_t, struct cryptop *, int);
  109. static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
  110. static int ixp_freesession(device_t, u_int64_t);
  111. #ifdef __ixp46X
  112. static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
  113. #endif
  114. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  115. static kmem_cache_t *qcache;
  116. #else
  117. static struct kmem_cache *qcache;
  118. #endif
  119. #define debug ixp_debug
  120. static int ixp_debug = 0;
  121. module_param(ixp_debug, int, 0644);
  122. MODULE_PARM_DESC(ixp_debug, "Enable debug");
  123. static int ixp_init_crypto = 1;
  124. module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
  125. MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
  126. static void ixp_process_pending(void *arg);
  127. static void ixp_registration(void *arg);
  128. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  129. static void ixp_process_pending_wq(struct work_struct *work);
  130. static void ixp_registration_wq(struct work_struct *work);
  131. #endif
  132. /*
  133. * dummy device structure
  134. */
  135. static struct {
  136. softc_device_decl sc_dev;
  137. } ixpdev;
  138. static device_method_t ixp_methods = {
  139. /* crypto device methods */
  140. DEVMETHOD(cryptodev_newsession, ixp_newsession),
  141. DEVMETHOD(cryptodev_freesession,ixp_freesession),
  142. DEVMETHOD(cryptodev_process, ixp_process),
  143. #ifdef __ixp46X
  144. DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
  145. #endif
  146. };
  147. /*
  148. * Generate a new software session.
  149. */
  150. static int
  151. ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
  152. {
  153. struct ixp_data *ixp;
  154. u_int32_t i;
  155. #define AUTH_LEN(cri, def) \
  156. (cri->cri_mlen ? cri->cri_mlen : (def))
  157. dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
  158. if (sid == NULL || cri == NULL) {
  159. dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
  160. return EINVAL;
  161. }
  162. if (ixp_sessions) {
  163. for (i = 1; i < ixp_sesnum; i++)
  164. if (ixp_sessions[i] == NULL)
  165. break;
  166. } else
  167. i = 1; /* NB: to silence compiler warning */
  168. if (ixp_sessions == NULL || i == ixp_sesnum) {
  169. struct ixp_data **ixpd;
  170. if (ixp_sessions == NULL) {
  171. i = 1; /* We leave ixp_sessions[0] empty */
  172. ixp_sesnum = CRYPTO_SW_SESSIONS;
  173. } else
  174. ixp_sesnum *= 2;
  175. ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
  176. if (ixpd == NULL) {
  177. /* Reset session number */
  178. if (ixp_sesnum == CRYPTO_SW_SESSIONS)
  179. ixp_sesnum = 0;
  180. else
  181. ixp_sesnum /= 2;
  182. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  183. return ENOBUFS;
  184. }
  185. memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
  186. /* Copy existing sessions */
  187. if (ixp_sessions) {
  188. memcpy(ixpd, ixp_sessions,
  189. (ixp_sesnum / 2) * sizeof(struct ixp_data *));
  190. kfree(ixp_sessions);
  191. }
  192. ixp_sessions = ixpd;
  193. }
  194. ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
  195. SLAB_ATOMIC);
  196. if (ixp_sessions[i] == NULL) {
  197. ixp_freesession(NULL, i);
  198. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  199. return ENOBUFS;
  200. }
  201. *sid = i;
  202. ixp = ixp_sessions[i];
  203. memset(ixp, 0, sizeof(*ixp));
  204. ixp->ixp_cipher_alg = -1;
  205. ixp->ixp_auth_alg = -1;
  206. ixp->ixp_ctx_id = -1;
  207. INIT_LIST_HEAD(&ixp->ixp_q);
  208. ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
  209. while (cri) {
  210. switch (cri->cri_alg) {
  211. case CRYPTO_DES_CBC:
  212. ixp->ixp_cipher_alg = cri->cri_alg;
  213. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
  214. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  215. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  216. ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
  217. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
  218. IX_CRYPTO_ACC_DES_IV_64;
  219. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  220. cri->cri_key, (cri->cri_klen + 7) / 8);
  221. break;
  222. case CRYPTO_3DES_CBC:
  223. ixp->ixp_cipher_alg = cri->cri_alg;
  224. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
  225. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  226. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  227. ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
  228. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
  229. IX_CRYPTO_ACC_DES_IV_64;
  230. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  231. cri->cri_key, (cri->cri_klen + 7) / 8);
  232. break;
  233. case CRYPTO_RIJNDAEL128_CBC:
  234. ixp->ixp_cipher_alg = cri->cri_alg;
  235. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
  236. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  237. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  238. ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
  239. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
  240. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  241. cri->cri_key, (cri->cri_klen + 7) / 8);
  242. break;
  243. case CRYPTO_MD5:
  244. case CRYPTO_MD5_HMAC:
  245. ixp->ixp_auth_alg = cri->cri_alg;
  246. ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
  247. ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
  248. ixp->ixp_ctx.authCtx.aadLen = 0;
  249. /* Only MD5_HMAC needs a key */
  250. if (cri->cri_alg == CRYPTO_MD5_HMAC) {
  251. ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
  252. if (ixp->ixp_ctx.authCtx.authKeyLen >
  253. sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
  254. printk(
  255. "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
  256. cri->cri_klen);
  257. ixp_freesession(NULL, i);
  258. return EINVAL;
  259. }
  260. memcpy(ixp->ixp_ctx.authCtx.key.authKey,
  261. cri->cri_key, (cri->cri_klen + 7) / 8);
  262. }
  263. break;
  264. case CRYPTO_SHA1:
  265. case CRYPTO_SHA1_HMAC:
  266. ixp->ixp_auth_alg = cri->cri_alg;
  267. ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
  268. ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
  269. ixp->ixp_ctx.authCtx.aadLen = 0;
  270. /* Only SHA1_HMAC needs a key */
  271. if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
  272. ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
  273. if (ixp->ixp_ctx.authCtx.authKeyLen >
  274. sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
  275. printk(
  276. "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
  277. cri->cri_klen);
  278. ixp_freesession(NULL, i);
  279. return EINVAL;
  280. }
  281. memcpy(ixp->ixp_ctx.authCtx.key.authKey,
  282. cri->cri_key, (cri->cri_klen + 7) / 8);
  283. }
  284. break;
  285. default:
  286. printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
  287. ixp_freesession(NULL, i);
  288. return EINVAL;
  289. }
  290. cri = cri->cri_next;
  291. }
  292. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  293. INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
  294. INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
  295. #else
  296. INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
  297. INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
  298. #endif
  299. return 0;
  300. }
  301. /*
  302. * Free a session.
  303. */
  304. static int
  305. ixp_freesession(device_t dev, u_int64_t tid)
  306. {
  307. u_int32_t sid = CRYPTO_SESID2LID(tid);
  308. dprintk("%s()\n", __FUNCTION__);
  309. if (sid > ixp_sesnum || ixp_sessions == NULL ||
  310. ixp_sessions[sid] == NULL) {
  311. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  312. return EINVAL;
  313. }
  314. /* Silently accept and return */
  315. if (sid == 0)
  316. return 0;
  317. if (ixp_sessions[sid]) {
  318. if (ixp_sessions[sid]->ixp_ctx_id != -1) {
  319. ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
  320. ixp_sessions[sid]->ixp_ctx_id = -1;
  321. }
  322. kfree(ixp_sessions[sid]);
  323. }
  324. ixp_sessions[sid] = NULL;
  325. if (ixp_blocked) {
  326. ixp_blocked = 0;
  327. crypto_unblock(ixp_id, CRYPTO_SYMQ);
  328. }
  329. return 0;
  330. }
  331. /*
  332. * callback for when hash processing is complete
  333. */
  334. static void
  335. ixp_hash_perform_cb(
  336. UINT32 hash_key_id,
  337. IX_MBUF *bufp,
  338. IxCryptoAccStatus status)
  339. {
  340. struct ixp_q *q;
  341. dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
  342. if (bufp == NULL) {
  343. printk("ixp: NULL buf in %s\n", __FUNCTION__);
  344. return;
  345. }
  346. q = IX_MBUF_PRIV(bufp);
  347. if (q == NULL) {
  348. printk("ixp: NULL priv in %s\n", __FUNCTION__);
  349. return;
  350. }
  351. if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
  352. /* On success, need to copy hash back into original client buffer */
  353. memcpy(q->ixp_hash_dest, q->ixp_hash_src,
  354. (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
  355. SHA1_HASH_LEN : MD5_HASH_LEN);
  356. }
  357. else {
  358. printk("ixp: hash perform failed status=%d\n", status);
  359. q->ixp_q_crp->crp_etype = EINVAL;
  360. }
  361. /* Free internal buffer used for hashing */
  362. kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
  363. crypto_done(q->ixp_q_crp);
  364. kmem_cache_free(qcache, q);
  365. }
  366. /*
  367. * setup a request and perform it
  368. */
  369. static void
  370. ixp_q_process(struct ixp_q *q)
  371. {
  372. IxCryptoAccStatus status;
  373. struct ixp_data *ixp = q->ixp_q_data;
  374. int auth_off = 0;
  375. int auth_len = 0;
  376. int crypt_off = 0;
  377. int crypt_len = 0;
  378. int icv_off = 0;
  379. char *crypt_func;
  380. dprintk("%s(%p)\n", __FUNCTION__, q);
  381. if (q->ixp_q_ccrd) {
  382. if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
  383. q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
  384. } else {
  385. q->ixp_q_iv = q->ixp_q_iv_data;
  386. crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
  387. q->ixp_q_ccrd->crd_inject,
  388. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
  389. (caddr_t) q->ixp_q_iv);
  390. }
  391. if (q->ixp_q_acrd) {
  392. auth_off = q->ixp_q_acrd->crd_skip;
  393. auth_len = q->ixp_q_acrd->crd_len;
  394. icv_off = q->ixp_q_acrd->crd_inject;
  395. }
  396. crypt_off = q->ixp_q_ccrd->crd_skip;
  397. crypt_len = q->ixp_q_ccrd->crd_len;
  398. } else { /* if (q->ixp_q_acrd) */
  399. auth_off = q->ixp_q_acrd->crd_skip;
  400. auth_len = q->ixp_q_acrd->crd_len;
  401. icv_off = q->ixp_q_acrd->crd_inject;
  402. }
  403. if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
  404. struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
  405. if (skb_shinfo(skb)->nr_frags) {
  406. /*
  407. * DAVIDM fix this limitation one day by using
  408. * a buffer pool and chaining, it is not currently
  409. * needed for current user/kernel space acceleration
  410. */
  411. printk("ixp: Cannot handle fragmented skb's yet !\n");
  412. q->ixp_q_crp->crp_etype = ENOENT;
  413. goto done;
  414. }
  415. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  416. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
  417. IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
  418. } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
  419. struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
  420. if (uiop->uio_iovcnt != 1) {
  421. /*
  422. * DAVIDM fix this limitation one day by using
  423. * a buffer pool and chaining, it is not currently
  424. * needed for current user/kernel space acceleration
  425. */
  426. printk("ixp: Cannot handle more than 1 iovec yet !\n");
  427. q->ixp_q_crp->crp_etype = ENOENT;
  428. goto done;
  429. }
  430. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  431. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
  432. IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
  433. } else /* contig buffer */ {
  434. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  435. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
  436. IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
  437. }
  438. IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
  439. if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
  440. /*
  441. * For SHA1 and MD5 hash, need to create an internal buffer that is big
  442. * enough to hold the original data + the appropriate padding for the
  443. * hash algorithm.
  444. */
  445. UINT8 *tbuf = NULL;
  446. IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
  447. ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
  448. tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
  449. if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
  450. printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
  451. IX_MBUF_MLEN(&q->ixp_q_mbuf));
  452. q->ixp_q_crp->crp_etype = ENOMEM;
  453. goto done;
  454. }
  455. memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
  456. /* Set location in client buffer to copy hash into */
  457. q->ixp_hash_dest =
  458. &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
  459. IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
  460. /* Set location in internal buffer for where hash starts */
  461. q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
  462. crypt_func = "ixCryptoAccHashPerform";
  463. status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
  464. &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
  465. &ixp->ixp_hash_key_id);
  466. }
  467. else {
  468. crypt_func = "ixCryptoAccAuthCryptPerform";
  469. status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
  470. NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
  471. q->ixp_q_iv);
  472. }
  473. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  474. return;
  475. if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
  476. q->ixp_q_crp->crp_etype = ENOMEM;
  477. goto done;
  478. }
  479. printk("ixp: %s failed %u\n", crypt_func, status);
  480. q->ixp_q_crp->crp_etype = EINVAL;
  481. done:
  482. crypto_done(q->ixp_q_crp);
  483. kmem_cache_free(qcache, q);
  484. }
  485. /*
  486. * because we cannot process the Q from the Register callback
  487. * we do it here on a task Q.
  488. */
  489. static void
  490. ixp_process_pending(void *arg)
  491. {
  492. struct ixp_data *ixp = arg;
  493. struct ixp_q *q = NULL;
  494. dprintk("%s(%p)\n", __FUNCTION__, arg);
  495. if (!ixp)
  496. return;
  497. while (!list_empty(&ixp->ixp_q)) {
  498. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  499. list_del(&q->ixp_q_list);
  500. ixp_q_process(q);
  501. }
  502. }
  503. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  504. static void
  505. ixp_process_pending_wq(struct work_struct *work)
  506. {
  507. struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
  508. ixp_process_pending(ixp);
  509. }
  510. #endif
  511. /*
  512. * callback for when context registration is complete
  513. */
  514. static void
  515. ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
  516. {
  517. int i;
  518. struct ixp_data *ixp;
  519. struct ixp_q *q;
  520. dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
  521. /*
  522. * free any buffer passed in to this routine
  523. */
  524. if (bufp) {
  525. IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
  526. kfree(IX_MBUF_MDATA(bufp));
  527. IX_MBUF_MDATA(bufp) = NULL;
  528. }
  529. for (i = 0; i < ixp_sesnum; i++) {
  530. ixp = ixp_sessions[i];
  531. if (ixp && ixp->ixp_ctx_id == ctx_id)
  532. break;
  533. }
  534. if (i >= ixp_sesnum) {
  535. printk("ixp: invalid context id %d\n", ctx_id);
  536. return;
  537. }
  538. if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
  539. /* this is normal to free the first of two buffers */
  540. dprintk("ixp: register not finished yet.\n");
  541. return;
  542. }
  543. if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
  544. printk("ixp: register failed 0x%x\n", status);
  545. while (!list_empty(&ixp->ixp_q)) {
  546. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  547. list_del(&q->ixp_q_list);
  548. q->ixp_q_crp->crp_etype = EINVAL;
  549. crypto_done(q->ixp_q_crp);
  550. kmem_cache_free(qcache, q);
  551. }
  552. return;
  553. }
  554. /*
  555. * we are now registered, we cannot start processing the Q here
  556. * or we get strange errors with AES (DES/3DES seem to be ok).
  557. */
  558. ixp->ixp_registered = 1;
  559. schedule_work(&ixp->ixp_pending_work);
  560. }
  561. /*
  562. * callback for when data processing is complete
  563. */
  564. static void
  565. ixp_perform_cb(
  566. UINT32 ctx_id,
  567. IX_MBUF *sbufp,
  568. IX_MBUF *dbufp,
  569. IxCryptoAccStatus status)
  570. {
  571. struct ixp_q *q;
  572. dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
  573. dbufp, status);
  574. if (sbufp == NULL) {
  575. printk("ixp: NULL sbuf in ixp_perform_cb\n");
  576. return;
  577. }
  578. q = IX_MBUF_PRIV(sbufp);
  579. if (q == NULL) {
  580. printk("ixp: NULL priv in ixp_perform_cb\n");
  581. return;
  582. }
  583. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  584. printk("ixp: perform failed status=%d\n", status);
  585. q->ixp_q_crp->crp_etype = EINVAL;
  586. }
  587. crypto_done(q->ixp_q_crp);
  588. kmem_cache_free(qcache, q);
  589. }
  590. /*
  591. * registration is not callable at IRQ time, so we defer
  592. * to a task queue, this routines completes the registration for us
  593. * when the task queue runs
  594. *
  595. * Unfortunately this means we cannot tell OCF that the driver is blocked,
  596. * we do that on the next request.
  597. */
  598. static void
  599. ixp_registration(void *arg)
  600. {
  601. struct ixp_data *ixp = arg;
  602. struct ixp_q *q = NULL;
  603. IX_MBUF *pri = NULL, *sec = NULL;
  604. int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
  605. if (!ixp) {
  606. printk("ixp: ixp_registration with no arg\n");
  607. return;
  608. }
  609. if (ixp->ixp_ctx_id != -1) {
  610. ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
  611. ixp->ixp_ctx_id = -1;
  612. }
  613. if (list_empty(&ixp->ixp_q)) {
  614. printk("ixp: ixp_registration with no Q\n");
  615. return;
  616. }
  617. /*
  618. * setup the primary and secondary buffers
  619. */
  620. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  621. if (q->ixp_q_acrd) {
  622. pri = &ixp->ixp_pri_mbuf;
  623. sec = &ixp->ixp_sec_mbuf;
  624. IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
  625. IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
  626. IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
  627. IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
  628. }
  629. /* Only need to register if a crypt op or HMAC op */
  630. if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
  631. ixp->ixp_auth_alg == CRYPTO_MD5)) {
  632. status = ixCryptoAccCtxRegister(
  633. &ixp->ixp_ctx,
  634. pri, sec,
  635. ixp_register_cb,
  636. ixp_perform_cb,
  637. &ixp->ixp_ctx_id);
  638. }
  639. else {
  640. /* Otherwise we start processing pending q */
  641. schedule_work(&ixp->ixp_pending_work);
  642. }
  643. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  644. return;
  645. if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
  646. printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
  647. ixp_blocked = 1;
  648. /* perhaps we should return EGAIN on queued ops ? */
  649. return;
  650. }
  651. printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
  652. ixp->ixp_ctx_id = -1;
  653. /*
  654. * everything waiting is toasted
  655. */
  656. while (!list_empty(&ixp->ixp_q)) {
  657. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  658. list_del(&q->ixp_q_list);
  659. q->ixp_q_crp->crp_etype = ENOENT;
  660. crypto_done(q->ixp_q_crp);
  661. kmem_cache_free(qcache, q);
  662. }
  663. }
  664. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  665. static void
  666. ixp_registration_wq(struct work_struct *work)
  667. {
  668. struct ixp_data *ixp = container_of(work, struct ixp_data,
  669. ixp_registration_work);
  670. ixp_registration(ixp);
  671. }
  672. #endif
  673. /*
  674. * Process a request.
  675. */
  676. static int
  677. ixp_process(device_t dev, struct cryptop *crp, int hint)
  678. {
  679. struct ixp_data *ixp;
  680. unsigned int lid;
  681. struct ixp_q *q = NULL;
  682. int status;
  683. dprintk("%s()\n", __FUNCTION__);
  684. /* Sanity check */
  685. if (crp == NULL) {
  686. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  687. return EINVAL;
  688. }
  689. crp->crp_etype = 0;
  690. if (ixp_blocked)
  691. return ERESTART;
  692. if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
  693. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  694. crp->crp_etype = EINVAL;
  695. goto done;
  696. }
  697. /*
  698. * find the session we are using
  699. */
  700. lid = crp->crp_sid & 0xffffffff;
  701. if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
  702. ixp_sessions[lid] == NULL) {
  703. crp->crp_etype = ENOENT;
  704. dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
  705. goto done;
  706. }
  707. ixp = ixp_sessions[lid];
  708. /*
  709. * setup a new request ready for queuing
  710. */
  711. q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
  712. if (q == NULL) {
  713. dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
  714. crp->crp_etype = ENOMEM;
  715. goto done;
  716. }
  717. /*
  718. * save some cycles by only zeroing the important bits
  719. */
  720. memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
  721. q->ixp_q_ccrd = NULL;
  722. q->ixp_q_acrd = NULL;
  723. q->ixp_q_crp = crp;
  724. q->ixp_q_data = ixp;
  725. /*
  726. * point the cipher and auth descriptors appropriately
  727. * check that we have something to do
  728. */
  729. if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
  730. q->ixp_q_ccrd = crp->crp_desc;
  731. else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
  732. q->ixp_q_acrd = crp->crp_desc;
  733. else {
  734. crp->crp_etype = ENOENT;
  735. dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
  736. goto done;
  737. }
  738. if (crp->crp_desc->crd_next) {
  739. if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
  740. q->ixp_q_ccrd = crp->crp_desc->crd_next;
  741. else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
  742. q->ixp_q_acrd = crp->crp_desc->crd_next;
  743. else {
  744. crp->crp_etype = ENOENT;
  745. dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
  746. goto done;
  747. }
  748. }
  749. /*
  750. * If there is a direction change for this context then we mark it as
  751. * unregistered and re-register is for the new direction. This is not
  752. * a very expensive operation and currently only tends to happen when
  753. * user-space application are doing benchmarks
  754. *
  755. * DM - we should be checking for pending requests before unregistering.
  756. */
  757. if (q->ixp_q_ccrd && ixp->ixp_registered &&
  758. ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
  759. dprintk("%s - detected direction change on session\n", __FUNCTION__);
  760. ixp->ixp_registered = 0;
  761. }
  762. /*
  763. * if we are registered, call straight into the perform code
  764. */
  765. if (ixp->ixp_registered) {
  766. ixp_q_process(q);
  767. return 0;
  768. }
  769. /*
  770. * the only part of the context not set in newsession is the direction
  771. * dependent parts
  772. */
  773. if (q->ixp_q_ccrd) {
  774. ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
  775. if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
  776. ixp->ixp_ctx.operation = q->ixp_q_acrd ?
  777. IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
  778. } else {
  779. ixp->ixp_ctx.operation = q->ixp_q_acrd ?
  780. IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
  781. }
  782. } else {
  783. /* q->ixp_q_acrd must be set if we are here */
  784. ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
  785. }
  786. status = list_empty(&ixp->ixp_q);
  787. list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
  788. if (status)
  789. schedule_work(&ixp->ixp_registration_work);
  790. return 0;
  791. done:
  792. if (q)
  793. kmem_cache_free(qcache, q);
  794. crypto_done(crp);
  795. return 0;
  796. }
  797. #ifdef __ixp46X
  798. /*
  799. * key processing support for the ixp465
  800. */
  801. /*
  802. * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
  803. * assume zeroed and only copy bits that are significant
  804. */
  805. static int
  806. ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
  807. {
  808. unsigned char *src = (unsigned char *) p->crp_p;
  809. unsigned char *dst;
  810. int len, bits = p->crp_nbits;
  811. dprintk("%s()\n", __FUNCTION__);
  812. if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
  813. dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
  814. bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
  815. return -1;
  816. }
  817. len = (bits + 31) / 32; /* the number UINT32's needed */
  818. dst = (unsigned char *) &buf[len];
  819. dst--;
  820. while (bits > 0) {
  821. *dst-- = *src++;
  822. bits -= 8;
  823. }
  824. #if 0 /* no need to zero remaining bits as it is done during request alloc */
  825. while (dst > (unsigned char *) buf)
  826. *dst-- = '\0';
  827. #endif
  828. op->pData = buf;
  829. op->dataLen = len;
  830. return 0;
  831. }
  832. /*
  833. * copy out the result, be as forgiving as we can about small output buffers
  834. */
  835. static int
  836. ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
  837. {
  838. unsigned char *dst = (unsigned char *) p->crp_p;
  839. unsigned char *src = (unsigned char *) buf;
  840. int len, z, bits = p->crp_nbits;
  841. dprintk("%s()\n", __FUNCTION__);
  842. len = op->dataLen * sizeof(UINT32);
  843. /* skip leading zeroes to be small buffer friendly */
  844. z = 0;
  845. while (z < len && src[z] == '\0')
  846. z++;
  847. src += len;
  848. src--;
  849. len -= z;
  850. while (len > 0 && bits > 0) {
  851. *dst++ = *src--;
  852. len--;
  853. bits -= 8;
  854. }
  855. while (bits > 0) {
  856. *dst++ = '\0';
  857. bits -= 8;
  858. }
  859. if (len > 0) {
  860. dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
  861. __FUNCTION__, len, z, p->crp_nbits / 8);
  862. return -1;
  863. }
  864. return 0;
  865. }
  866. /*
  867. * the parameter offsets for exp_mod
  868. */
  869. #define IXP_PARAM_BASE 0
  870. #define IXP_PARAM_EXP 1
  871. #define IXP_PARAM_MOD 2
  872. #define IXP_PARAM_RES 3
  873. /*
  874. * key processing complete callback, is also used to start processing
  875. * by passing a NULL for pResult
  876. */
  877. static void
  878. ixp_kperform_cb(
  879. IxCryptoAccPkeEauOperation operation,
  880. IxCryptoAccPkeEauOpResult *pResult,
  881. BOOL carryOrBorrow,
  882. IxCryptoAccStatus status)
  883. {
  884. struct ixp_pkq *q, *tmp;
  885. unsigned long flags;
  886. dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
  887. carryOrBorrow, status);
  888. /* handle a completed request */
  889. if (pResult) {
  890. if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
  891. q = ixp_pk_cur;
  892. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  893. dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
  894. q->pkq_krp->krp_status = ERANGE; /* could do better */
  895. } else {
  896. /* copy out the result */
  897. if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
  898. &q->pkq_result, q->pkq_obuf))
  899. q->pkq_krp->krp_status = ERANGE;
  900. }
  901. crypto_kdone(q->pkq_krp);
  902. kfree(q);
  903. ixp_pk_cur = NULL;
  904. } else
  905. printk("%s - callback with invalid result pointer\n", __FUNCTION__);
  906. }
  907. spin_lock_irqsave(&ixp_pkq_lock, flags);
  908. if (ixp_pk_cur || list_empty(&ixp_pkq)) {
  909. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  910. return;
  911. }
  912. list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
  913. list_del(&q->pkq_list);
  914. ixp_pk_cur = q;
  915. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  916. status = ixCryptoAccPkeEauPerform(
  917. IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
  918. &q->pkq_op,
  919. ixp_kperform_cb,
  920. &q->pkq_result);
  921. if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
  922. dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
  923. return; /* callback will return here for callback */
  924. } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
  925. printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
  926. } else {
  927. printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
  928. __FUNCTION__, status);
  929. }
  930. q->pkq_krp->krp_status = ERANGE; /* could do better */
  931. crypto_kdone(q->pkq_krp);
  932. kfree(q);
  933. spin_lock_irqsave(&ixp_pkq_lock, flags);
  934. }
  935. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  936. }
  937. static int
  938. ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
  939. {
  940. struct ixp_pkq *q;
  941. int rc = 0;
  942. unsigned long flags;
  943. dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
  944. krp->krp_param[IXP_PARAM_BASE].crp_nbits,
  945. krp->krp_param[IXP_PARAM_EXP].crp_nbits,
  946. krp->krp_param[IXP_PARAM_MOD].crp_nbits,
  947. krp->krp_param[IXP_PARAM_RES].crp_nbits);
  948. if (krp->krp_op != CRK_MOD_EXP) {
  949. krp->krp_status = EOPNOTSUPP;
  950. goto err;
  951. }
  952. q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
  953. if (q == NULL) {
  954. krp->krp_status = ENOMEM;
  955. goto err;
  956. }
  957. /*
  958. * The PKE engine does not appear to zero the output buffer
  959. * appropriately, so we need to do it all here.
  960. */
  961. memset(q, 0, sizeof(*q));
  962. q->pkq_krp = krp;
  963. INIT_LIST_HEAD(&q->pkq_list);
  964. if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
  965. q->pkq_ibuf0))
  966. rc = 1;
  967. if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
  968. &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
  969. rc = 2;
  970. if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
  971. &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
  972. rc = 3;
  973. if (rc) {
  974. kfree(q);
  975. krp->krp_status = ERANGE;
  976. goto err;
  977. }
  978. q->pkq_result.pData = q->pkq_obuf;
  979. q->pkq_result.dataLen =
  980. (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
  981. spin_lock_irqsave(&ixp_pkq_lock, flags);
  982. list_add_tail(&q->pkq_list, &ixp_pkq);
  983. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  984. if (!ixp_pk_cur)
  985. ixp_kperform_cb(0, NULL, 0, 0);
  986. return (0);
  987. err:
  988. crypto_kdone(krp);
  989. return (0);
  990. }
  991. #ifdef CONFIG_OCF_RANDOMHARVEST
  992. /*
  993. * We run the random number generator output through SHA so that it
  994. * is FIPS compliant.
  995. */
  996. static volatile int sha_done = 0;
  997. static unsigned char sha_digest[20];
  998. static void
  999. ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
  1000. {
  1001. dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
  1002. if (sha_digest != digest)
  1003. printk("digest error\n");
  1004. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  1005. sha_done = 1;
  1006. else
  1007. sha_done = -status;
  1008. }
  1009. static int
  1010. ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
  1011. {
  1012. IxCryptoAccStatus status;
  1013. int i, n, rc;
  1014. dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
  1015. memset(buf, 0, maxwords * sizeof(*buf));
  1016. status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
  1017. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  1018. dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
  1019. __FUNCTION__, status);
  1020. return 0;
  1021. }
  1022. /*
  1023. * run the random data through SHA to make it look more random
  1024. */
  1025. n = sizeof(sha_digest); /* process digest bytes at a time */
  1026. rc = 0;
  1027. for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
  1028. if ((maxwords - i) * sizeof(*buf) < n)
  1029. n = (maxwords - i) * sizeof(*buf);
  1030. sha_done = 0;
  1031. status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
  1032. (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
  1033. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  1034. dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
  1035. return -EIO;
  1036. }
  1037. while (!sha_done)
  1038. schedule();
  1039. if (sha_done < 0) {
  1040. dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
  1041. return 0;
  1042. }
  1043. memcpy(&buf[i], sha_digest, n);
  1044. rc += n / sizeof(*buf);;
  1045. }
  1046. return rc;
  1047. }
  1048. #endif /* CONFIG_OCF_RANDOMHARVEST */
  1049. #endif /* __ixp46X */
  1050. /*
  1051. * our driver startup and shutdown routines
  1052. */
  1053. static int
  1054. ixp_init(void)
  1055. {
  1056. dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
  1057. if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
  1058. printk("ixCryptoAccInit failed, assuming already initialised!\n");
  1059. qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
  1060. SLAB_HWCACHE_ALIGN, NULL
  1061. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  1062. , NULL
  1063. #endif
  1064. );
  1065. if (!qcache) {
  1066. printk("failed to create Qcache\n");
  1067. return -ENOENT;
  1068. }
  1069. memset(&ixpdev, 0, sizeof(ixpdev));
  1070. softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
  1071. ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
  1072. CRYPTOCAP_F_HARDWARE);
  1073. if (ixp_id < 0)
  1074. panic("IXP/OCF crypto device cannot initialize!");
  1075. #define REGISTER(alg) \
  1076. crypto_register(ixp_id,alg,0,0)
  1077. REGISTER(CRYPTO_DES_CBC);
  1078. REGISTER(CRYPTO_3DES_CBC);
  1079. REGISTER(CRYPTO_RIJNDAEL128_CBC);
  1080. #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
  1081. REGISTER(CRYPTO_MD5);
  1082. REGISTER(CRYPTO_SHA1);
  1083. #endif
  1084. REGISTER(CRYPTO_MD5_HMAC);
  1085. REGISTER(CRYPTO_SHA1_HMAC);
  1086. #undef REGISTER
  1087. #ifdef __ixp46X
  1088. spin_lock_init(&ixp_pkq_lock);
  1089. /*
  1090. * we do not enable the go fast options here as they can potentially
  1091. * allow timing based attacks
  1092. *
  1093. * http://www.openssl.org/news/secadv_20030219.txt
  1094. */
  1095. ixCryptoAccPkeEauExpConfig(0, 0);
  1096. crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
  1097. #ifdef CONFIG_OCF_RANDOMHARVEST
  1098. crypto_rregister(ixp_id, ixp_read_random, NULL);
  1099. #endif
  1100. #endif
  1101. return 0;
  1102. }
  1103. static void
  1104. ixp_exit(void)
  1105. {
  1106. dprintk("%s()\n", __FUNCTION__);
  1107. crypto_unregister_all(ixp_id);
  1108. ixp_id = -1;
  1109. kmem_cache_destroy(qcache);
  1110. qcache = NULL;
  1111. }
  1112. module_init(ixp_init);
  1113. module_exit(ixp_exit);
  1114. MODULE_LICENSE("Dual BSD/GPL");
  1115. MODULE_AUTHOR("David McCullough <[email protected]>");
  1116. MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");