Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src-draft/trunk]: src/sys Add AES implementation with VIA ACE.
details: https://anonhg.NetBSD.org/src-all/rev/b2e78f5dd5c2
branches: trunk
changeset: 935235:b2e78f5dd5c2
user: Taylor R Campbell <riastradh%NetBSD.org@localhost>
date: Mon Jun 15 16:27:33 2020 +0000
description:
Add AES implementation with VIA ACE.
diffstat:
sys/arch/x86/conf/files.x86 | 3 +
sys/arch/x86/x86/identcpu.c | 4 +
sys/crypto/aes/arch/x86/aes_via.c | 626 +++++++++++++++++++++++++++++++++++
sys/crypto/aes/arch/x86/aes_via.h | 36 ++
sys/crypto/aes/arch/x86/files.aesvia | 3 +
5 files changed, 672 insertions(+), 0 deletions(-)
diffs (truncated from 709 to 300 lines):
diff -r 4b4c9f8cb999 -r b2e78f5dd5c2 sys/arch/x86/conf/files.x86
--- a/sys/arch/x86/conf/files.x86 Mon Jun 15 16:19:29 2020 +0000
+++ b/sys/arch/x86/conf/files.x86 Mon Jun 15 16:27:33 2020 +0000
@@ -168,3 +168,6 @@
# AES-NI
include "crypto/aes/arch/x86/files.aesni"
+
+# VIA ACE
+include "crypto/aes/arch/x86/files.aesvia"
diff -r 4b4c9f8cb999 -r b2e78f5dd5c2 sys/arch/x86/x86/identcpu.c
--- a/sys/arch/x86/x86/identcpu.c Mon Jun 15 16:19:29 2020 +0000
+++ b/sys/arch/x86/x86/identcpu.c Mon Jun 15 16:27:33 2020 +0000
@@ -40,6 +40,7 @@
#include <sys/cpu.h>
#include <crypto/aes/arch/x86/aes_ni.h>
+#include <crypto/aes/arch/x86/aes_via.h>
#include <uvm/uvm_extern.h>
@@ -1000,7 +1001,10 @@
#ifdef __x86_64__ /* not yet implemented on i386 */
if (cpu_feature[1] & CPUID2_AES)
aes_md_init(&aes_ni_impl);
+ else
#endif
+ if (cpu_feature[4] & CPUID_VIA_HAS_ACE)
+ aes_md_init(&aes_via_impl);
} else {
/*
* If not first. Warn about cpu_feature mismatch for
diff -r 4b4c9f8cb999 -r b2e78f5dd5c2 sys/crypto/aes/arch/x86/aes_via.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/crypto/aes/arch/x86/aes_via.c Mon Jun 15 16:27:33 2020 +0000
@@ -0,0 +1,626 @@
+/* $NetBSD$ */
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD$");
+
+#include <sys/types.h>
+#include <sys/evcnt.h>
+#include <sys/systm.h>
+
+#include <crypto/aes/aes.h>
+#include <crypto/aes/aes_bear.h>
+
+#include <x86/cpufunc.h>
+#include <x86/cpuvar.h>
+#include <x86/fpu.h>
+#include <x86/specialreg.h>
+#include <x86/via_padlock.h>
+
+static void
+aesvia_reload_keys(void)
+{
+
+ asm volatile("pushf; popf");
+}
+
+static uint32_t
+aesvia_keylen_cw0(unsigned nrounds)
+{
+
+ /*
+ * Determine the control word bits for the key size / number of
+ * rounds. For AES-128, the hardware can do key expansion on
+ * the fly; for AES-192 and AES-256, software must do it.
+ */
+ switch (nrounds) {
+ case AES_128_NROUNDS:
+ return C3_CRYPT_CWLO_KEY128;
+ case AES_192_NROUNDS:
+ return C3_CRYPT_CWLO_KEY192 | C3_CRYPT_CWLO_KEYGEN_SW;
+ case AES_256_NROUNDS:
+ return C3_CRYPT_CWLO_KEY256 | C3_CRYPT_CWLO_KEYGEN_SW;
+ default:
+ panic("invalid AES nrounds: %u", nrounds);
+ }
+}
+
+static void
+aesvia_setenckey(struct aesenc *enc, const uint8_t *key, uint32_t nrounds)
+{
+ size_t key_len;
+
+ switch (nrounds) {
+ case AES_128_NROUNDS:
+ enc->aese_aes.aes_rk[0] = le32dec(key + 4*0);
+ enc->aese_aes.aes_rk[1] = le32dec(key + 4*1);
+ enc->aese_aes.aes_rk[2] = le32dec(key + 4*2);
+ enc->aese_aes.aes_rk[3] = le32dec(key + 4*3);
+ return;
+ case AES_192_NROUNDS:
+ key_len = 24;
+ break;
+ case AES_256_NROUNDS:
+ key_len = 32;
+ break;
+ default:
+ panic("invalid AES nrounds: %u", nrounds);
+ }
+ br_aes_ct_keysched_stdenc(enc->aese_aes.aes_rk, key, key_len);
+}
+
+static void
+aesvia_setdeckey(struct aesdec *dec, const uint8_t *key, uint32_t nrounds)
+{
+ size_t key_len;
+
+ switch (nrounds) {
+ case AES_128_NROUNDS:
+ dec->aesd_aes.aes_rk[0] = le32dec(key + 4*0);
+ dec->aesd_aes.aes_rk[1] = le32dec(key + 4*1);
+ dec->aesd_aes.aes_rk[2] = le32dec(key + 4*2);
+ dec->aesd_aes.aes_rk[3] = le32dec(key + 4*3);
+ return;
+ case AES_192_NROUNDS:
+ key_len = 24;
+ break;
+ case AES_256_NROUNDS:
+ key_len = 32;
+ break;
+ default:
+ panic("invalid AES nrounds: %u", nrounds);
+ }
+ br_aes_ct_keysched_stddec(dec->aesd_aes.aes_rk, key, key_len);
+}
+
+static inline void
+aesvia_enc1(const struct aesenc *enc, const uint8_t in[static 16],
+ uint8_t out[static 16], uint32_t cw0)
+{
+ const uint32_t cw[4] __aligned(16) = {
+ [0] = (cw0
+ | C3_CRYPT_CWLO_ALG_AES
+ | C3_CRYPT_CWLO_ENCRYPT
+ | C3_CRYPT_CWLO_NORMAL),
+ };
+ size_t nblocks = 1;
+
+ KASSERT(((uintptr_t)enc & 0xf) == 0);
+ KASSERT(((uintptr_t)in & 0xf) == 0);
+ KASSERT(((uintptr_t)out & 0xf) == 0);
+
+ asm volatile("rep xcryptecb"
+ : "+c"(nblocks), "+S"(in), "+D"(out)
+ : "b"(enc), "d"(cw)
+ : "memory", "cc");
+}
+
+static inline void
+aesvia_dec1(const struct aesdec *dec, const uint8_t in[static 16],
+ uint8_t out[static 16], uint32_t cw0)
+{
+ const uint32_t cw[4] __aligned(16) = {
+ [0] = (cw0
+ | C3_CRYPT_CWLO_ALG_AES
+ | C3_CRYPT_CWLO_DECRYPT
+ | C3_CRYPT_CWLO_NORMAL),
+ };
+ size_t nblocks = 1;
+
+ KASSERT(((uintptr_t)dec & 0xf) == 0);
+ KASSERT(((uintptr_t)in & 0xf) == 0);
+ KASSERT(((uintptr_t)out & 0xf) == 0);
+
+ asm volatile("rep xcryptecb"
+ : "+c"(nblocks), "+S"(in), "+D"(out)
+ : "b"(dec), "d"(cw)
+ : "memory", "cc");
+}
+
+static struct evcnt enc_aligned_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
+ NULL, "aesvia", "enc aligned");
+EVCNT_ATTACH_STATIC(enc_aligned_evcnt);
+static struct evcnt enc_unaligned_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
+ NULL, "aesvia", "dec unaligned");
+EVCNT_ATTACH_STATIC(enc_unaligned_evcnt);
+
+static void
+aesvia_enc(const struct aesenc *enc, const uint8_t in[static 16],
+ uint8_t out[static 16], uint32_t nrounds)
+{
+ const uint32_t cw0 = aesvia_keylen_cw0(nrounds);
+
+ fpu_kern_enter();
+ aesvia_reload_keys();
+ if ((((uintptr_t)in | (uintptr_t)out) & 0xf) == 0 &&
+ ((uintptr_t)in & 0xff0) != 0xff0) {
+ enc_aligned_evcnt.ev_count++;
+ aesvia_enc1(enc, in, out, cw0);
+ } else {
+ enc_unaligned_evcnt.ev_count++;
+ /*
+ * VIA requires 16-byte/128-bit alignment, and
+ * xcrypt-ecb reads one block past the one we're
+ * working on -- which may go past the end of the page
+ * into unmapped territory. Use a bounce buffer if
+ * either constraint is violated.
+ */
+ uint8_t inbuf[16] __aligned(16);
+ uint8_t outbuf[16] __aligned(16);
+
+ memcpy(inbuf, in, 16);
+ aesvia_enc1(enc, inbuf, outbuf, cw0);
+ memcpy(out, outbuf, 16);
+
+ explicit_memset(inbuf, 0, sizeof inbuf);
+ explicit_memset(outbuf, 0, sizeof outbuf);
+ }
+ fpu_kern_leave();
+}
+
+static struct evcnt dec_aligned_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
+ NULL, "aesvia", "dec aligned");
+EVCNT_ATTACH_STATIC(dec_aligned_evcnt);
+static struct evcnt dec_unaligned_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
+ NULL, "aesvia", "dec unaligned");
+EVCNT_ATTACH_STATIC(dec_unaligned_evcnt);
+
+static void
+aesvia_dec(const struct aesdec *dec, const uint8_t in[static 16],
+ uint8_t out[static 16], uint32_t nrounds)
+{
+ const uint32_t cw0 = aesvia_keylen_cw0(nrounds);
+
+ fpu_kern_enter();
+ aesvia_reload_keys();
+ if ((((uintptr_t)in | (uintptr_t)out) & 0xf) == 0 &&
+ ((uintptr_t)in & 0xff0) != 0xff0) {
+ dec_aligned_evcnt.ev_count++;
+ aesvia_dec1(dec, in, out, cw0);
+ } else {
+ dec_unaligned_evcnt.ev_count++;
+ /*
+ * VIA requires 16-byte/128-bit alignment, and
+ * xcrypt-ecb reads one block past the one we're
+ * working on -- which may go past the end of the page
+ * into unmapped territory. Use a bounce buffer if
+ * either constraint is violated.
+ */
+ uint8_t inbuf[16] __aligned(16);
+ uint8_t outbuf[16] __aligned(16);
+
+ memcpy(inbuf, in, 16);
+ aesvia_dec1(dec, inbuf, outbuf, cw0);
+ memcpy(out, outbuf, 16);
+
+ explicit_memset(inbuf, 0, sizeof inbuf);
+ explicit_memset(outbuf, 0, sizeof outbuf);
+ }
+ fpu_kern_leave();
+}
+
+static inline void
+aesvia_cbc_enc1(const struct aesenc *enc, const uint8_t in[static 16],
+ uint8_t out[static 16], size_t nblocks, uint8_t **ivp, uint32_t cw0)
+{
+ const uint32_t cw[4] __aligned(16) = {
+ [0] = (cw0
+ | C3_CRYPT_CWLO_ALG_AES
+ | C3_CRYPT_CWLO_ENCRYPT
+ | C3_CRYPT_CWLO_NORMAL),
+ };
+
+ KASSERT(((uintptr_t)enc & 0xf) == 0);
+ KASSERT(((uintptr_t)in & 0xf) == 0);
+ KASSERT(((uintptr_t)out & 0xf) == 0);
+ KASSERT(((uintptr_t)*ivp & 0xf) == 0);
+
+ /*
+ * Register effects:
Home |
Main Index |
Thread Index |
Old Index