HardenedBSD src tree
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

473 lines
11 KiB

  1. /*-
  2. * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
  3. * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
  4. * Copyright (c) 2014,2016 The FreeBSD Foundation
  5. * All rights reserved.
  6. *
  7. * Portions of this software were developed by John-Mark Gurney
  8. * under sponsorship of the FreeBSD Foundation and
  9. * Rubicon Communications, LLC (Netgate).
  10. *
  11. * This software was developed by Andrew Turner under
  12. * sponsorship from the FreeBSD Foundation.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in the
  21. * documentation and/or other materials provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
  27. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  28. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  29. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  30. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  31. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  32. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  33. * SUCH DAMAGE.
  34. */
  35. /*
  36. * This is based on the aesni code.
  37. */
  38. #include <sys/cdefs.h>
  39. __FBSDID("$FreeBSD$");
  40. #include <sys/param.h>
  41. #include <sys/systm.h>
  42. #include <sys/kernel.h>
  43. #include <sys/bus.h>
  44. #include <sys/endian.h>
  45. #include <sys/lock.h>
  46. #include <sys/malloc.h>
  47. #include <sys/mbuf.h>
  48. #include <sys/module.h>
  49. #include <sys/mutex.h>
  50. #include <sys/queue.h>
  51. #include <sys/rwlock.h>
  52. #include <sys/smp.h>
  53. #include <sys/uio.h>
  54. #include <machine/vfp.h>
  55. #include <opencrypto/cryptodev.h>
  56. #include <cryptodev_if.h>
  57. #include <crypto/armv8/armv8_crypto.h>
  58. #include <crypto/rijndael/rijndael.h>
  59. struct armv8_crypto_softc {
  60. int dieing;
  61. int32_t cid;
  62. struct rwlock lock;
  63. };
  64. static struct mtx *ctx_mtx;
  65. static struct fpu_kern_ctx **ctx_vfp;
  66. #define AQUIRE_CTX(i, ctx) \
  67. do { \
  68. (i) = PCPU_GET(cpuid); \
  69. mtx_lock(&ctx_mtx[(i)]); \
  70. (ctx) = ctx_vfp[(i)]; \
  71. } while (0)
  72. #define RELEASE_CTX(i, ctx) \
  73. do { \
  74. mtx_unlock(&ctx_mtx[(i)]); \
  75. (i) = -1; \
  76. (ctx) = NULL; \
  77. } while (0)
  78. static int armv8_crypto_cipher_process(struct armv8_crypto_session *,
  79. struct cryptodesc *, struct cryptop *);
  80. MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data");
  81. static void
  82. armv8_crypto_identify(driver_t *drv, device_t parent)
  83. {
  84. /* NB: order 10 is so we get attached after h/w devices */
  85. if (device_find_child(parent, "armv8crypto", -1) == NULL &&
  86. BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0)
  87. panic("ARMv8 crypto: could not attach");
  88. }
  89. static int
  90. armv8_crypto_probe(device_t dev)
  91. {
  92. uint64_t reg;
  93. int ret = ENXIO;
  94. reg = READ_SPECIALREG(id_aa64isar0_el1);
  95. switch (ID_AA64ISAR0_AES_VAL(reg)) {
  96. case ID_AA64ISAR0_AES_BASE:
  97. case ID_AA64ISAR0_AES_PMULL:
  98. ret = 0;
  99. break;
  100. }
  101. device_set_desc_copy(dev, "AES-CBC");
  102. /* TODO: Check more fields as we support more features */
  103. return (ret);
  104. }
  105. static int
  106. armv8_crypto_attach(device_t dev)
  107. {
  108. struct armv8_crypto_softc *sc;
  109. int i;
  110. sc = device_get_softc(dev);
  111. sc->dieing = 0;
  112. sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session),
  113. CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
  114. if (sc->cid < 0) {
  115. device_printf(dev, "Could not get crypto driver id.\n");
  116. return (ENOMEM);
  117. }
  118. rw_init(&sc->lock, "armv8crypto");
  119. ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO,
  120. M_WAITOK|M_ZERO);
  121. ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO,
  122. M_WAITOK|M_ZERO);
  123. CPU_FOREACH(i) {
  124. ctx_vfp[i] = fpu_kern_alloc_ctx(0);
  125. mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW);
  126. }
  127. crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
  128. return (0);
  129. }
  130. static int
  131. armv8_crypto_detach(device_t dev)
  132. {
  133. struct armv8_crypto_softc *sc;
  134. int i;
  135. sc = device_get_softc(dev);
  136. rw_wlock(&sc->lock);
  137. sc->dieing = 1;
  138. rw_wunlock(&sc->lock);
  139. crypto_unregister_all(sc->cid);
  140. rw_destroy(&sc->lock);
  141. CPU_FOREACH(i) {
  142. if (ctx_vfp[i] != NULL) {
  143. mtx_destroy(&ctx_mtx[i]);
  144. fpu_kern_free_ctx(ctx_vfp[i]);
  145. }
  146. ctx_vfp[i] = NULL;
  147. }
  148. free(ctx_mtx, M_ARMV8_CRYPTO);
  149. ctx_mtx = NULL;
  150. free(ctx_vfp, M_ARMV8_CRYPTO);
  151. ctx_vfp = NULL;
  152. return (0);
  153. }
  154. static int
  155. armv8_crypto_cipher_setup(struct armv8_crypto_session *ses,
  156. struct cryptoini *encini)
  157. {
  158. int i;
  159. switch (ses->algo) {
  160. case CRYPTO_AES_CBC:
  161. switch (encini->cri_klen) {
  162. case 128:
  163. ses->rounds = AES128_ROUNDS;
  164. break;
  165. case 192:
  166. ses->rounds = AES192_ROUNDS;
  167. break;
  168. case 256:
  169. ses->rounds = AES256_ROUNDS;
  170. break;
  171. default:
  172. CRYPTDEB("invalid CBC/ICM/GCM key length");
  173. return (EINVAL);
  174. }
  175. break;
  176. default:
  177. return (EINVAL);
  178. }
  179. rijndaelKeySetupEnc(ses->enc_schedule, encini->cri_key,
  180. encini->cri_klen);
  181. rijndaelKeySetupDec(ses->dec_schedule, encini->cri_key,
  182. encini->cri_klen);
  183. for (i = 0; i < nitems(ses->enc_schedule); i++) {
  184. ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]);
  185. ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]);
  186. }
  187. return (0);
  188. }
  189. static int
  190. armv8_crypto_newsession(device_t dev, crypto_session_t cses,
  191. struct cryptoini *cri)
  192. {
  193. struct armv8_crypto_softc *sc;
  194. struct armv8_crypto_session *ses;
  195. struct cryptoini *encini;
  196. int error;
  197. if (cri == NULL) {
  198. CRYPTDEB("no cri");
  199. return (EINVAL);
  200. }
  201. sc = device_get_softc(dev);
  202. if (sc->dieing)
  203. return (EINVAL);
  204. ses = NULL;
  205. encini = NULL;
  206. for (; cri != NULL; cri = cri->cri_next) {
  207. switch (cri->cri_alg) {
  208. case CRYPTO_AES_CBC:
  209. if (encini != NULL) {
  210. CRYPTDEB("encini already set");
  211. return (EINVAL);
  212. }
  213. encini = cri;
  214. break;
  215. default:
  216. CRYPTDEB("unhandled algorithm");
  217. return (EINVAL);
  218. }
  219. }
  220. if (encini == NULL) {
  221. CRYPTDEB("no cipher");
  222. return (EINVAL);
  223. }
  224. rw_wlock(&sc->lock);
  225. if (sc->dieing) {
  226. rw_wunlock(&sc->lock);
  227. return (EINVAL);
  228. }
  229. ses = crypto_get_driver_session(cses);
  230. ses->algo = encini->cri_alg;
  231. error = armv8_crypto_cipher_setup(ses, encini);
  232. if (error != 0) {
  233. CRYPTDEB("setup failed");
  234. rw_wunlock(&sc->lock);
  235. return (error);
  236. }
  237. rw_wunlock(&sc->lock);
  238. return (0);
  239. }
  240. static int
  241. armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused)
  242. {
  243. struct cryptodesc *crd, *enccrd;
  244. struct armv8_crypto_session *ses;
  245. int error;
  246. error = 0;
  247. enccrd = NULL;
  248. /* Sanity check. */
  249. if (crp == NULL)
  250. return (EINVAL);
  251. if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
  252. error = EINVAL;
  253. goto out;
  254. }
  255. for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
  256. switch (crd->crd_alg) {
  257. case CRYPTO_AES_CBC:
  258. if (enccrd != NULL) {
  259. error = EINVAL;
  260. goto out;
  261. }
  262. enccrd = crd;
  263. break;
  264. default:
  265. error = EINVAL;
  266. goto out;
  267. }
  268. }
  269. if (enccrd == NULL) {
  270. error = EINVAL;
  271. goto out;
  272. }
  273. /* We can only handle full blocks for now */
  274. if ((enccrd->crd_len % AES_BLOCK_LEN) != 0) {
  275. error = EINVAL;
  276. goto out;
  277. }
  278. ses = crypto_get_driver_session(crp->crp_session);
  279. error = armv8_crypto_cipher_process(ses, enccrd, crp);
  280. out:
  281. crp->crp_etype = error;
  282. crypto_done(crp);
  283. return (error);
  284. }
  285. static uint8_t *
  286. armv8_crypto_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
  287. int *allocated)
  288. {
  289. struct mbuf *m;
  290. struct uio *uio;
  291. struct iovec *iov;
  292. uint8_t *addr;
  293. if (crp->crp_flags & CRYPTO_F_IMBUF) {
  294. m = (struct mbuf *)crp->crp_buf;
  295. if (m->m_next != NULL)
  296. goto alloc;
  297. addr = mtod(m, uint8_t *);
  298. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  299. uio = (struct uio *)crp->crp_buf;
  300. if (uio->uio_iovcnt != 1)
  301. goto alloc;
  302. iov = uio->uio_iov;
  303. addr = (uint8_t *)iov->iov_base;
  304. } else
  305. addr = (uint8_t *)crp->crp_buf;
  306. *allocated = 0;
  307. addr += enccrd->crd_skip;
  308. return (addr);
  309. alloc:
  310. addr = malloc(enccrd->crd_len, M_ARMV8_CRYPTO, M_NOWAIT);
  311. if (addr != NULL) {
  312. *allocated = 1;
  313. crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
  314. enccrd->crd_len, addr);
  315. } else
  316. *allocated = 0;
  317. return (addr);
  318. }
  319. static int
  320. armv8_crypto_cipher_process(struct armv8_crypto_session *ses,
  321. struct cryptodesc *enccrd, struct cryptop *crp)
  322. {
  323. struct fpu_kern_ctx *ctx;
  324. uint8_t *buf;
  325. uint8_t iv[AES_BLOCK_LEN];
  326. int allocated, i;
  327. int encflag, ivlen;
  328. int kt;
  329. encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
  330. buf = armv8_crypto_cipher_alloc(enccrd, crp, &allocated);
  331. if (buf == NULL)
  332. return (ENOMEM);
  333. kt = is_fpu_kern_thread(0);
  334. if (!kt) {
  335. AQUIRE_CTX(i, ctx);
  336. fpu_kern_enter(curthread, ctx,
  337. FPU_KERN_NORMAL | FPU_KERN_KTHR);
  338. }
  339. if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
  340. panic("CRD_F_KEY_EXPLICIT");
  341. }
  342. switch (enccrd->crd_alg) {
  343. case CRYPTO_AES_CBC:
  344. ivlen = AES_BLOCK_LEN;
  345. break;
  346. }
  347. /* Setup iv */
  348. if (encflag) {
  349. if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
  350. bcopy(enccrd->crd_iv, iv, ivlen);
  351. else
  352. arc4rand(iv, ivlen, 0);
  353. if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
  354. crypto_copyback(crp->crp_flags, crp->crp_buf,
  355. enccrd->crd_inject, ivlen, iv);
  356. } else {
  357. if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
  358. bcopy(enccrd->crd_iv, iv, ivlen);
  359. else
  360. crypto_copydata(crp->crp_flags, crp->crp_buf,
  361. enccrd->crd_inject, ivlen, iv);
  362. }
  363. /* Do work */
  364. switch (ses->algo) {
  365. case CRYPTO_AES_CBC:
  366. if (encflag)
  367. armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule,
  368. enccrd->crd_len, buf, buf, iv);
  369. else
  370. armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule,
  371. enccrd->crd_len, buf, iv);
  372. break;
  373. }
  374. if (allocated)
  375. crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
  376. enccrd->crd_len, buf);
  377. if (!kt) {
  378. fpu_kern_leave(curthread, ctx);
  379. RELEASE_CTX(i, ctx);
  380. }
  381. if (allocated) {
  382. bzero(buf, enccrd->crd_len);
  383. free(buf, M_ARMV8_CRYPTO);
  384. }
  385. return (0);
  386. }
  387. static device_method_t armv8_crypto_methods[] = {
  388. DEVMETHOD(device_identify, armv8_crypto_identify),
  389. DEVMETHOD(device_probe, armv8_crypto_probe),
  390. DEVMETHOD(device_attach, armv8_crypto_attach),
  391. DEVMETHOD(device_detach, armv8_crypto_detach),
  392. DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession),
  393. DEVMETHOD(cryptodev_process, armv8_crypto_process),
  394. DEVMETHOD_END,
  395. };
  396. static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods,
  397. sizeof(struct armv8_crypto_softc));
  398. static devclass_t armv8_crypto_devclass;
  399. DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass,
  400. 0, 0);