ecp_sm2p256.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. *
  9. */
  10. /*
  11. * SM2 low level APIs are deprecated for public use, but still ok for
  12. * internal use.
  13. */
  14. #include "internal/deprecated.h"
  15. #include <string.h>
  16. #include <openssl/err.h>
  17. #include "crypto/bn.h"
  18. #include "ec_local.h"
  19. #include "internal/common.h"
  20. #include "internal/constant_time.h"
  21. #define P256_LIMBS (256 / BN_BITS2)
  22. #if !defined(OPENSSL_NO_SM2_PRECOMP)
  23. extern const BN_ULONG ecp_sm2p256_precomputed[8 * 32 * 256];
  24. #endif
  25. typedef struct {
  26. BN_ULONG X[P256_LIMBS];
  27. BN_ULONG Y[P256_LIMBS];
  28. BN_ULONG Z[P256_LIMBS];
  29. } P256_POINT;
  30. typedef struct {
  31. BN_ULONG X[P256_LIMBS];
  32. BN_ULONG Y[P256_LIMBS];
  33. } P256_POINT_AFFINE;
  34. #if !defined(OPENSSL_NO_SM2_PRECOMP)
  35. /* Coordinates of G, for which we have precomputed tables */
  36. ALIGN32 static const BN_ULONG def_xG[P256_LIMBS] = {
  37. 0x715a4589334c74c7, 0x8fe30bbff2660be1,
  38. 0x5f9904466a39c994, 0x32c4ae2c1f198119
  39. };
  40. ALIGN32 static const BN_ULONG def_yG[P256_LIMBS] = {
  41. 0x02df32e52139f0a0, 0xd0a9877cc62a4740,
  42. 0x59bdcee36b692153, 0xbc3736a2f4f6779c,
  43. };
  44. #endif
  45. /* p and order for SM2 according to GB/T 32918.5-2017 */
  46. ALIGN32 static const BN_ULONG def_p[P256_LIMBS] = {
  47. 0xffffffffffffffff, 0xffffffff00000000,
  48. 0xffffffffffffffff, 0xfffffffeffffffff
  49. };
  50. ALIGN32 static const BN_ULONG ONE[P256_LIMBS] = {1, 0, 0, 0};
  51. /* Functions implemented in assembly */
  52. /*
  53. * Most of below mentioned functions *preserve* the property of inputs
  54. * being fully reduced, i.e. being in [0, modulus) range. Simply put if
  55. * inputs are fully reduced, then output is too.
  56. */
  57. /* Right shift: a >> 1 */
  58. void bn_rshift1(BN_ULONG *a);
  59. /* Sub: r = a - b */
  60. void bn_sub(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
  61. /* Modular div by 2: r = a / 2 mod p */
  62. void ecp_sm2p256_div_by_2(BN_ULONG *r, const BN_ULONG *a);
  63. /* Modular div by 2: r = a / 2 mod n, where n = ord(p) */
  64. void ecp_sm2p256_div_by_2_mod_ord(BN_ULONG *r, const BN_ULONG *a);
  65. /* Modular add: r = a + b mod p */
  66. void ecp_sm2p256_add(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
  67. /* Modular sub: r = a - b mod p */
  68. void ecp_sm2p256_sub(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
  69. /* Modular sub: r = a - b mod n, where n = ord(p) */
  70. void ecp_sm2p256_sub_mod_ord(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
  71. /* Modular mul by 3: out = 3 * a mod p */
  72. void ecp_sm2p256_mul_by_3(BN_ULONG *r, const BN_ULONG *a);
  73. /* Modular mul: r = a * b mod p */
  74. void ecp_sm2p256_mul(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
  75. /* Modular sqr: r = a ^ 2 mod p */
  76. void ecp_sm2p256_sqr(BN_ULONG *r, const BN_ULONG *a);
  77. static ossl_inline BN_ULONG is_zeros(const BN_ULONG *a)
  78. {
  79. BN_ULONG res;
  80. res = a[0] | a[1] | a[2] | a[3];
  81. return constant_time_is_zero_64(res);
  82. }
  83. static ossl_inline int is_equal(const BN_ULONG *a, const BN_ULONG *b)
  84. {
  85. BN_ULONG res;
  86. res = a[0] ^ b[0];
  87. res |= a[1] ^ b[1];
  88. res |= a[2] ^ b[2];
  89. res |= a[3] ^ b[3];
  90. return constant_time_is_zero_64(res);
  91. }
  92. static ossl_inline int is_greater(const BN_ULONG *a, const BN_ULONG *b)
  93. {
  94. int i;
  95. for (i = P256_LIMBS - 1; i >= 0; --i) {
  96. if (a[i] > b[i])
  97. return 1;
  98. if (a[i] < b[i])
  99. return -1;
  100. }
  101. return 0;
  102. }
  103. #define is_one(a) is_equal(a, ONE)
  104. #define is_even(a) !(a[0] & 1)
  105. #define is_point_equal(a, b) \
  106. is_equal(a->X, b->X) && \
  107. is_equal(a->Y, b->Y) && \
  108. is_equal(a->Z, b->Z)
  109. /* Bignum and field elements conversion */
  110. #define ecp_sm2p256_bignum_field_elem(out, in) \
  111. bn_copy_words(out, in, P256_LIMBS)
  112. /* Binary algorithm for inversion in Fp */
  113. #define BN_MOD_INV(out, in, mod_div, mod_sub, mod) \
  114. do { \
  115. ALIGN32 BN_ULONG u[4]; \
  116. ALIGN32 BN_ULONG v[4]; \
  117. ALIGN32 BN_ULONG x1[4] = {1, 0, 0, 0}; \
  118. ALIGN32 BN_ULONG x2[4] = {0}; \
  119. \
  120. if (is_zeros(in)) \
  121. return; \
  122. memcpy(u, in, 32); \
  123. memcpy(v, mod, 32); \
  124. while (!is_one(u) && !is_one(v)) { \
  125. while (is_even(u)) { \
  126. bn_rshift1(u); \
  127. mod_div(x1, x1); \
  128. } \
  129. while (is_even(v)) { \
  130. bn_rshift1(v); \
  131. mod_div(x2, x2); \
  132. } \
  133. if (is_greater(u, v) == 1) { \
  134. bn_sub(u, u, v); \
  135. mod_sub(x1, x1, x2); \
  136. } else { \
  137. bn_sub(v, v, u); \
  138. mod_sub(x2, x2, x1); \
  139. } \
  140. } \
  141. if (is_one(u)) \
  142. memcpy(out, x1, 32); \
  143. else \
  144. memcpy(out, x2, 32); \
  145. } while (0)
  146. /* Modular inverse |out| = |in|^(-1) mod |p|. */
  147. static ossl_inline void ecp_sm2p256_mod_inverse(BN_ULONG* out,
  148. const BN_ULONG* in) {
  149. BN_MOD_INV(out, in, ecp_sm2p256_div_by_2, ecp_sm2p256_sub, def_p);
  150. }
  151. /* Point double: R <- P + P */
  152. static void ecp_sm2p256_point_double(P256_POINT *R, const P256_POINT *P)
  153. {
  154. unsigned int i;
  155. ALIGN32 BN_ULONG tmp0[P256_LIMBS];
  156. ALIGN32 BN_ULONG tmp1[P256_LIMBS];
  157. ALIGN32 BN_ULONG tmp2[P256_LIMBS];
  158. /* zero-check P->Z */
  159. if (is_zeros(P->Z)) {
  160. for (i = 0; i < P256_LIMBS; ++i)
  161. R->Z[i] = 0;
  162. return;
  163. }
  164. ecp_sm2p256_sqr(tmp0, P->Z);
  165. ecp_sm2p256_sub(tmp1, P->X, tmp0);
  166. ecp_sm2p256_add(tmp0, P->X, tmp0);
  167. ecp_sm2p256_mul(tmp1, tmp1, tmp0);
  168. ecp_sm2p256_mul_by_3(tmp1, tmp1);
  169. ecp_sm2p256_add(R->Y, P->Y, P->Y);
  170. ecp_sm2p256_mul(R->Z, R->Y, P->Z);
  171. ecp_sm2p256_sqr(R->Y, R->Y);
  172. ecp_sm2p256_mul(tmp2, R->Y, P->X);
  173. ecp_sm2p256_sqr(R->Y, R->Y);
  174. ecp_sm2p256_div_by_2(R->Y, R->Y);
  175. ecp_sm2p256_sqr(R->X, tmp1);
  176. ecp_sm2p256_add(tmp0, tmp2, tmp2);
  177. ecp_sm2p256_sub(R->X, R->X, tmp0);
  178. ecp_sm2p256_sub(tmp0, tmp2, R->X);
  179. ecp_sm2p256_mul(tmp0, tmp0, tmp1);
  180. ecp_sm2p256_sub(tmp1, tmp0, R->Y);
  181. memcpy(R->Y, tmp1, 32);
  182. }
  183. /* Point add affine: R <- P + Q */
  184. static void ecp_sm2p256_point_add_affine(P256_POINT *R, const P256_POINT *P,
  185. const P256_POINT_AFFINE *Q)
  186. {
  187. unsigned int i;
  188. ALIGN32 BN_ULONG tmp0[P256_LIMBS] = {0};
  189. ALIGN32 BN_ULONG tmp1[P256_LIMBS] = {0};
  190. ALIGN32 BN_ULONG tmp2[P256_LIMBS] = {0};
  191. ALIGN32 BN_ULONG tmp3[P256_LIMBS] = {0};
  192. /* zero-check P->Z */
  193. if (is_zeros(P->Z)) {
  194. for (i = 0; i < P256_LIMBS; ++i) {
  195. R->X[i] = Q->X[i];
  196. R->Y[i] = Q->Y[i];
  197. R->Z[i] = 0;
  198. }
  199. R->Z[0] = 1;
  200. return;
  201. }
  202. ecp_sm2p256_sqr(tmp0, P->Z);
  203. ecp_sm2p256_mul(tmp1, tmp0, P->Z);
  204. ecp_sm2p256_mul(tmp0, tmp0, Q->X);
  205. ecp_sm2p256_mul(tmp1, tmp1, Q->Y);
  206. ecp_sm2p256_sub(tmp0, tmp0, P->X);
  207. ecp_sm2p256_sub(tmp1, tmp1, P->Y);
  208. /* zero-check tmp0, tmp1 */
  209. if (is_zeros(tmp0)) {
  210. if (is_zeros(tmp1)) {
  211. P256_POINT K;
  212. for (i = 0; i < P256_LIMBS; ++i) {
  213. K.X[i] = Q->X[i];
  214. K.Y[i] = Q->Y[i];
  215. K.Z[i] = 0;
  216. }
  217. K.Z[0] = 1;
  218. ecp_sm2p256_point_double(R, &K);
  219. } else {
  220. for (i = 0; i < P256_LIMBS; ++i)
  221. R->Z[i] = 0;
  222. }
  223. return;
  224. }
  225. ecp_sm2p256_mul(R->Z, P->Z, tmp0);
  226. ecp_sm2p256_sqr(tmp2, tmp0);
  227. ecp_sm2p256_mul(tmp3, tmp2, tmp0);
  228. ecp_sm2p256_mul(tmp2, tmp2, P->X);
  229. ecp_sm2p256_add(tmp0, tmp2, tmp2);
  230. ecp_sm2p256_sqr(R->X, tmp1);
  231. ecp_sm2p256_sub(R->X, R->X, tmp0);
  232. ecp_sm2p256_sub(R->X, R->X, tmp3);
  233. ecp_sm2p256_sub(tmp2, tmp2, R->X);
  234. ecp_sm2p256_mul(tmp2, tmp2, tmp1);
  235. ecp_sm2p256_mul(tmp3, tmp3, P->Y);
  236. ecp_sm2p256_sub(R->Y, tmp2, tmp3);
  237. }
  238. /* Point add: R <- P + Q */
  239. static void ecp_sm2p256_point_add(P256_POINT *R, const P256_POINT *P,
  240. const P256_POINT *Q)
  241. {
  242. unsigned int i;
  243. ALIGN32 BN_ULONG tmp0[P256_LIMBS] = {0};
  244. ALIGN32 BN_ULONG tmp1[P256_LIMBS] = {0};
  245. ALIGN32 BN_ULONG tmp2[P256_LIMBS] = {0};
  246. /* zero-check P | Q ->Z */
  247. if (is_zeros(P->Z)) {
  248. for (i = 0; i < P256_LIMBS; ++i) {
  249. R->X[i] = Q->X[i];
  250. R->Y[i] = Q->Y[i];
  251. R->Z[i] = Q->Z[i];
  252. }
  253. return;
  254. } else if (is_zeros(Q->Z)) {
  255. for (i = 0; i < P256_LIMBS; ++i) {
  256. R->X[i] = P->X[i];
  257. R->Y[i] = P->Y[i];
  258. R->Z[i] = P->Z[i];
  259. }
  260. return;
  261. } else if (is_point_equal(P, Q)) {
  262. ecp_sm2p256_point_double(R, Q);
  263. return;
  264. }
  265. ecp_sm2p256_sqr(tmp0, P->Z);
  266. ecp_sm2p256_mul(tmp1, tmp0, P->Z);
  267. ecp_sm2p256_mul(tmp0, tmp0, Q->X);
  268. ecp_sm2p256_mul(tmp1, tmp1, Q->Y);
  269. ecp_sm2p256_mul(R->Y, P->Y, Q->Z);
  270. ecp_sm2p256_mul(R->Z, Q->Z, P->Z);
  271. ecp_sm2p256_sqr(tmp2, Q->Z);
  272. ecp_sm2p256_mul(R->Y, tmp2, R->Y);
  273. ecp_sm2p256_mul(R->X, tmp2, P->X);
  274. ecp_sm2p256_sub(tmp0, tmp0, R->X);
  275. ecp_sm2p256_mul(R->Z, tmp0, R->Z);
  276. ecp_sm2p256_sub(tmp1, tmp1, R->Y);
  277. ecp_sm2p256_sqr(tmp2, tmp0);
  278. ecp_sm2p256_mul(tmp0, tmp0, tmp2);
  279. ecp_sm2p256_mul(tmp2, tmp2, R->X);
  280. ecp_sm2p256_sqr(R->X, tmp1);
  281. ecp_sm2p256_sub(R->X, R->X, tmp2);
  282. ecp_sm2p256_sub(R->X, R->X, tmp2);
  283. ecp_sm2p256_sub(R->X, R->X, tmp0);
  284. ecp_sm2p256_sub(tmp2, tmp2, R->X);
  285. ecp_sm2p256_mul(tmp2, tmp1, tmp2);
  286. ecp_sm2p256_mul(tmp0, tmp0, R->Y);
  287. ecp_sm2p256_sub(R->Y, tmp2, tmp0);
  288. }
  289. #if !defined(OPENSSL_NO_SM2_PRECOMP)
  290. /* Base point mul by scalar: k - scalar, G - base point */
  291. static void ecp_sm2p256_point_G_mul_by_scalar(P256_POINT *R, const BN_ULONG *k)
  292. {
  293. unsigned int i, index, mask = 0xff;
  294. P256_POINT_AFFINE Q;
  295. memset(R, 0, sizeof(P256_POINT));
  296. if (is_zeros(k))
  297. return;
  298. index = k[0] & mask;
  299. if (index) {
  300. index = index * 8;
  301. memcpy(R->X, ecp_sm2p256_precomputed + index, 32);
  302. memcpy(R->Y, ecp_sm2p256_precomputed + index + P256_LIMBS, 32);
  303. R->Z[0] = 1;
  304. }
  305. for (i = 1; i < 32; ++i) {
  306. index = (k[i / 8] >> (8 * (i % 8))) & mask;
  307. if (index) {
  308. index = index + i * 256;
  309. index = index * 8;
  310. memcpy(Q.X, ecp_sm2p256_precomputed + index, 32);
  311. memcpy(Q.Y, ecp_sm2p256_precomputed + index + P256_LIMBS, 32);
  312. ecp_sm2p256_point_add_affine(R, R, &Q);
  313. }
  314. }
  315. }
  316. #endif
  317. /*
  318. * Affine point mul by scalar: k - scalar, P - affine point
  319. */
  320. static void ecp_sm2p256_point_P_mul_by_scalar(P256_POINT *R, const BN_ULONG *k,
  321. P256_POINT_AFFINE P)
  322. {
  323. int i, init = 0;
  324. unsigned int index, mask = 0x0f;
  325. ALIGN64 P256_POINT precomputed[16];
  326. memset(R, 0, sizeof(P256_POINT));
  327. if (is_zeros(k))
  328. return;
  329. /* The first value of the precomputed table is P. */
  330. memcpy(precomputed[1].X, P.X, 32);
  331. memcpy(precomputed[1].Y, P.Y, 32);
  332. precomputed[1].Z[0] = 1;
  333. precomputed[1].Z[1] = 0;
  334. precomputed[1].Z[2] = 0;
  335. precomputed[1].Z[3] = 0;
  336. /* The second value of the precomputed table is 2P. */
  337. ecp_sm2p256_point_double(&precomputed[2], &precomputed[1]);
  338. /* The subsequent elements are 3P, 4P, and so on. */
  339. for (i = 3; i < 16; ++i)
  340. ecp_sm2p256_point_add_affine(&precomputed[i], &precomputed[i - 1], &P);
  341. for (i = 64 - 1; i >= 0; --i) {
  342. index = (k[i / 16] >> (4 * (i % 16))) & mask;
  343. if (init == 0) {
  344. if (index) {
  345. memcpy(R, &precomputed[index], sizeof(P256_POINT));
  346. init = 1;
  347. }
  348. } else {
  349. ecp_sm2p256_point_double(R, R);
  350. ecp_sm2p256_point_double(R, R);
  351. ecp_sm2p256_point_double(R, R);
  352. ecp_sm2p256_point_double(R, R);
  353. if (index)
  354. ecp_sm2p256_point_add(R, R, &precomputed[index]);
  355. }
  356. }
  357. }
  358. /* Get affine point */
  359. static void ecp_sm2p256_point_get_affine(P256_POINT_AFFINE *R,
  360. const P256_POINT *P)
  361. {
  362. ALIGN32 BN_ULONG z_inv3[P256_LIMBS] = {0};
  363. ALIGN32 BN_ULONG z_inv2[P256_LIMBS] = {0};
  364. if (is_one(P->Z)) {
  365. memcpy(R->X, P->X, 32);
  366. memcpy(R->Y, P->Y, 32);
  367. return;
  368. }
  369. ecp_sm2p256_mod_inverse(z_inv3, P->Z);
  370. ecp_sm2p256_sqr(z_inv2, z_inv3);
  371. ecp_sm2p256_mul(R->X, P->X, z_inv2);
  372. ecp_sm2p256_mul(z_inv3, z_inv3, z_inv2);
  373. ecp_sm2p256_mul(R->Y, P->Y, z_inv3);
  374. }
  375. #if !defined(OPENSSL_NO_SM2_PRECOMP)
  376. static int ecp_sm2p256_is_affine_G(const EC_POINT *generator)
  377. {
  378. return (bn_get_top(generator->X) == P256_LIMBS)
  379. && (bn_get_top(generator->Y) == P256_LIMBS)
  380. && is_equal(bn_get_words(generator->X), def_xG)
  381. && is_equal(bn_get_words(generator->Y), def_yG)
  382. && (generator->Z_is_one == 1);
  383. }
  384. #endif
  385. /* r = sum(scalar[i]*point[i]) */
  386. static int ecp_sm2p256_windowed_mul(const EC_GROUP *group,
  387. P256_POINT *r,
  388. const BIGNUM **scalar,
  389. const EC_POINT **point,
  390. size_t num, BN_CTX *ctx)
  391. {
  392. unsigned int i;
  393. int ret = 0;
  394. const BIGNUM **scalars = NULL;
  395. ALIGN32 BN_ULONG k[P256_LIMBS] = {0};
  396. P256_POINT kP;
  397. ALIGN32 union {
  398. P256_POINT p;
  399. P256_POINT_AFFINE a;
  400. } t, p;
  401. if (num > OPENSSL_MALLOC_MAX_NELEMS(P256_POINT)
  402. || (scalars = OPENSSL_malloc(num * sizeof(BIGNUM *))) == NULL) {
  403. ECerr(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
  404. goto err;
  405. }
  406. memset(r, 0, sizeof(P256_POINT));
  407. for (i = 0; i < num; i++) {
  408. if (EC_POINT_is_at_infinity(group, point[i]))
  409. continue;
  410. if ((BN_num_bits(scalar[i]) > 256) || BN_is_negative(scalar[i])) {
  411. BIGNUM *tmp;
  412. if ((tmp = BN_CTX_get(ctx)) == NULL)
  413. goto err;
  414. if (!BN_nnmod(tmp, scalar[i], group->order, ctx)) {
  415. ECerr(ERR_LIB_EC, ERR_R_BN_LIB);
  416. goto err;
  417. }
  418. scalars[i] = tmp;
  419. } else {
  420. scalars[i] = scalar[i];
  421. }
  422. if (ecp_sm2p256_bignum_field_elem(k, scalars[i]) <= 0
  423. || ecp_sm2p256_bignum_field_elem(p.p.X, point[i]->X) <= 0
  424. || ecp_sm2p256_bignum_field_elem(p.p.Y, point[i]->Y) <= 0
  425. || ecp_sm2p256_bignum_field_elem(p.p.Z, point[i]->Z) <= 0) {
  426. ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
  427. goto err;
  428. }
  429. ecp_sm2p256_point_get_affine(&t.a, &p.p);
  430. ecp_sm2p256_point_P_mul_by_scalar(&kP, k, t.a);
  431. ecp_sm2p256_point_add(r, r, &kP);
  432. }
  433. ret = 1;
  434. err:
  435. OPENSSL_free(scalars);
  436. return ret;
  437. }
  438. /* r = scalar*G + sum(scalars[i]*points[i]) */
  439. static int ecp_sm2p256_points_mul(const EC_GROUP *group,
  440. EC_POINT *r,
  441. const BIGNUM *scalar,
  442. size_t num,
  443. const EC_POINT *points[],
  444. const BIGNUM *scalars[], BN_CTX *ctx)
  445. {
  446. int ret = 0, p_is_infinity = 0;
  447. const EC_POINT *generator = NULL;
  448. ALIGN32 BN_ULONG k[P256_LIMBS] = {0};
  449. ALIGN32 union {
  450. P256_POINT p;
  451. P256_POINT_AFFINE a;
  452. } t, p;
  453. if ((num + 1) == 0 || (num + 1) > OPENSSL_MALLOC_MAX_NELEMS(void *)) {
  454. ECerr(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
  455. goto err;
  456. }
  457. BN_CTX_start(ctx);
  458. if (scalar) {
  459. generator = EC_GROUP_get0_generator(group);
  460. if (generator == NULL) {
  461. ECerr(ERR_LIB_EC, EC_R_UNDEFINED_GENERATOR);
  462. goto err;
  463. }
  464. if (!ecp_sm2p256_bignum_field_elem(k, scalar)) {
  465. ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
  466. goto err;
  467. }
  468. #if !defined(OPENSSL_NO_SM2_PRECOMP)
  469. if (ecp_sm2p256_is_affine_G(generator)) {
  470. ecp_sm2p256_point_G_mul_by_scalar(&p.p, k);
  471. } else
  472. #endif
  473. {
  474. /* if no precomputed table */
  475. const EC_POINT *new_generator[1];
  476. const BIGNUM *g_scalars[1];
  477. new_generator[0] = generator;
  478. g_scalars[0] = scalar;
  479. if (!ecp_sm2p256_windowed_mul(group, &p.p, g_scalars, new_generator,
  480. (new_generator[0] != NULL
  481. && g_scalars[0] != NULL), ctx))
  482. goto err;
  483. }
  484. } else {
  485. p_is_infinity = 1;
  486. }
  487. if (num) {
  488. P256_POINT *out = &t.p;
  489. if (p_is_infinity)
  490. out = &p.p;
  491. if (!ecp_sm2p256_windowed_mul(group, out, scalars, points, num, ctx))
  492. goto err;
  493. if (!p_is_infinity)
  494. ecp_sm2p256_point_add(&p.p, &p.p, out);
  495. }
  496. /* Not constant-time, but we're only operating on the public output. */
  497. if (!bn_set_words(r->X, p.p.X, P256_LIMBS)
  498. || !bn_set_words(r->Y, p.p.Y, P256_LIMBS)
  499. || !bn_set_words(r->Z, p.p.Z, P256_LIMBS))
  500. goto err;
  501. r->Z_is_one = is_equal(bn_get_words(r->Z), ONE) & 1;
  502. ret = 1;
  503. err:
  504. BN_CTX_end(ctx);
  505. return ret;
  506. }
  507. static int ecp_sm2p256_field_mul(const EC_GROUP *group, BIGNUM *r,
  508. const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
  509. {
  510. ALIGN32 BN_ULONG a_fe[P256_LIMBS] = {0};
  511. ALIGN32 BN_ULONG b_fe[P256_LIMBS] = {0};
  512. ALIGN32 BN_ULONG r_fe[P256_LIMBS] = {0};
  513. if (a == NULL || b == NULL || r == NULL)
  514. return 0;
  515. if (!ecp_sm2p256_bignum_field_elem(a_fe, a)
  516. || !ecp_sm2p256_bignum_field_elem(b_fe, b)) {
  517. ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
  518. return 0;
  519. }
  520. ecp_sm2p256_mul(r_fe, a_fe, b_fe);
  521. if (!bn_set_words(r, r_fe, P256_LIMBS))
  522. return 0;
  523. return 1;
  524. }
  525. static int ecp_sm2p256_field_sqr(const EC_GROUP *group, BIGNUM *r,
  526. const BIGNUM *a, BN_CTX *ctx)
  527. {
  528. ALIGN32 BN_ULONG a_fe[P256_LIMBS] = {0};
  529. ALIGN32 BN_ULONG r_fe[P256_LIMBS] = {0};
  530. if (a == NULL || r == NULL)
  531. return 0;
  532. if (!ecp_sm2p256_bignum_field_elem(a_fe, a)) {
  533. ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
  534. return 0;
  535. }
  536. ecp_sm2p256_sqr(r_fe, a_fe);
  537. if (!bn_set_words(r, r_fe, P256_LIMBS))
  538. return 0;
  539. return 1;
  540. }
  541. const EC_METHOD *EC_GFp_sm2p256_method(void)
  542. {
  543. static const EC_METHOD ret = {
  544. EC_FLAGS_DEFAULT_OCT,
  545. NID_X9_62_prime_field,
  546. ossl_ec_GFp_simple_group_init,
  547. ossl_ec_GFp_simple_group_finish,
  548. ossl_ec_GFp_simple_group_clear_finish,
  549. ossl_ec_GFp_simple_group_copy,
  550. ossl_ec_GFp_simple_group_set_curve,
  551. ossl_ec_GFp_simple_group_get_curve,
  552. ossl_ec_GFp_simple_group_get_degree,
  553. ossl_ec_group_simple_order_bits,
  554. ossl_ec_GFp_simple_group_check_discriminant,
  555. ossl_ec_GFp_simple_point_init,
  556. ossl_ec_GFp_simple_point_finish,
  557. ossl_ec_GFp_simple_point_clear_finish,
  558. ossl_ec_GFp_simple_point_copy,
  559. ossl_ec_GFp_simple_point_set_to_infinity,
  560. ossl_ec_GFp_simple_point_set_affine_coordinates,
  561. ossl_ec_GFp_simple_point_get_affine_coordinates,
  562. 0, 0, 0,
  563. ossl_ec_GFp_simple_add,
  564. ossl_ec_GFp_simple_dbl,
  565. ossl_ec_GFp_simple_invert,
  566. ossl_ec_GFp_simple_is_at_infinity,
  567. ossl_ec_GFp_simple_is_on_curve,
  568. ossl_ec_GFp_simple_cmp,
  569. ossl_ec_GFp_simple_make_affine,
  570. ossl_ec_GFp_simple_points_make_affine,
  571. ecp_sm2p256_points_mul, /* mul */
  572. 0 /* precompute_mult */,
  573. 0 /* have_precompute_mult */,
  574. ecp_sm2p256_field_mul,
  575. ecp_sm2p256_field_sqr,
  576. 0 /* field_div */,
  577. ossl_ec_GFp_simple_field_inv,
  578. 0 /* field_encode */,
  579. 0 /* field_decode */,
  580. 0 /* field_set_to_one */,
  581. ossl_ec_key_simple_priv2oct,
  582. ossl_ec_key_simple_oct2priv,
  583. 0, /* set private */
  584. ossl_ec_key_simple_generate_key,
  585. ossl_ec_key_simple_check_key,
  586. ossl_ec_key_simple_generate_public_key,
  587. 0, /* keycopy */
  588. 0, /* keyfinish */
  589. ossl_ecdh_simple_compute_key,
  590. ossl_ecdsa_simple_sign_setup,
  591. ossl_ecdsa_simple_sign_sig,
  592. ossl_ecdsa_simple_verify_sig,
  593. 0, /* use constant‑time fallback for inverse mod order */
  594. 0, /* blind_coordinates */
  595. 0, /* ladder_pre */
  596. 0, /* ladder_step */
  597. 0 /* ladder_post */
  598. };
  599. return &ret;
  600. }