mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
crypto: Add aesenc_MC
Add a primitive for MixColumns. Acked-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
192fa84986
commit
04e1f30eed
3 changed files with 81 additions and 0 deletions
61
crypto/aes.c
61
crypto/aes.c
|
@ -28,6 +28,8 @@
|
||||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
#include "qemu/bswap.h"
|
||||||
|
#include "qemu/bitops.h"
|
||||||
#include "crypto/aes.h"
|
#include "crypto/aes.h"
|
||||||
#include "crypto/aes-round.h"
|
#include "crypto/aes-round.h"
|
||||||
|
|
||||||
|
@ -1216,6 +1218,65 @@ static const u32 rcon[] = {
|
||||||
0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
|
0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perform MixColumns.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
aesenc_MC_swap(AESState *r, const AESState *st, bool swap)
|
||||||
|
{
|
||||||
|
int swap_b = swap * 0xf;
|
||||||
|
int swap_w = swap * 0x3;
|
||||||
|
bool be = HOST_BIG_ENDIAN ^ swap;
|
||||||
|
uint32_t t;
|
||||||
|
|
||||||
|
/* Note that AES_mc_rot is encoded for little-endian. */
|
||||||
|
t = ( AES_mc_rot[st->b[swap_b ^ 0x0]] ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x1]], 8) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x2]], 16) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x3]], 24));
|
||||||
|
if (be) {
|
||||||
|
t = bswap32(t);
|
||||||
|
}
|
||||||
|
r->w[swap_w ^ 0] = t;
|
||||||
|
|
||||||
|
t = ( AES_mc_rot[st->b[swap_b ^ 0x4]] ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x5]], 8) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x6]], 16) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x7]], 24));
|
||||||
|
if (be) {
|
||||||
|
t = bswap32(t);
|
||||||
|
}
|
||||||
|
r->w[swap_w ^ 1] = t;
|
||||||
|
|
||||||
|
t = ( AES_mc_rot[st->b[swap_b ^ 0x8]] ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0x9]], 8) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0xA]], 16) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0xB]], 24));
|
||||||
|
if (be) {
|
||||||
|
t = bswap32(t);
|
||||||
|
}
|
||||||
|
r->w[swap_w ^ 2] = t;
|
||||||
|
|
||||||
|
t = ( AES_mc_rot[st->b[swap_b ^ 0xC]] ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0xD]], 8) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0xE]], 16) ^
|
||||||
|
rol32(AES_mc_rot[st->b[swap_b ^ 0xF]], 24));
|
||||||
|
if (be) {
|
||||||
|
t = bswap32(t);
|
||||||
|
}
|
||||||
|
r->w[swap_w ^ 3] = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void aesenc_MC_gen(AESState *r, const AESState *st)
|
||||||
|
{
|
||||||
|
aesenc_MC_swap(r, st, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void aesenc_MC_genrev(AESState *r, const AESState *st)
|
||||||
|
{
|
||||||
|
aesenc_MC_swap(r, st, true);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform SubBytes + ShiftRows + AddRoundKey.
|
* Perform SubBytes + ShiftRows + AddRoundKey.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
#define HAVE_AES_ACCEL false
|
#define HAVE_AES_ACCEL false
|
||||||
#define ATTR_AES_ACCEL
|
#define ATTR_AES_ACCEL
|
||||||
|
|
||||||
|
void aesenc_MC_accel(AESState *, const AESState *, bool)
|
||||||
|
QEMU_ERROR("unsupported accel");
|
||||||
void aesenc_SB_SR_AK_accel(AESState *, const AESState *,
|
void aesenc_SB_SR_AK_accel(AESState *, const AESState *,
|
||||||
const AESState *, bool)
|
const AESState *, bool)
|
||||||
QEMU_ERROR("unsupported accel");
|
QEMU_ERROR("unsupported accel");
|
||||||
|
|
|
@ -20,6 +20,24 @@ typedef union {
|
||||||
|
|
||||||
#include "host/crypto/aes-round.h"
|
#include "host/crypto/aes-round.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perform MixColumns.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void aesenc_MC_gen(AESState *ret, const AESState *st);
|
||||||
|
void aesenc_MC_genrev(AESState *ret, const AESState *st);
|
||||||
|
|
||||||
|
static inline void aesenc_MC(AESState *r, const AESState *st, bool be)
|
||||||
|
{
|
||||||
|
if (HAVE_AES_ACCEL) {
|
||||||
|
aesenc_MC_accel(r, st, be);
|
||||||
|
} else if (HOST_BIG_ENDIAN == be) {
|
||||||
|
aesenc_MC_gen(r, st);
|
||||||
|
} else {
|
||||||
|
aesenc_MC_genrev(r, st);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform SubBytes + ShiftRows + AddRoundKey.
|
* Perform SubBytes + ShiftRows + AddRoundKey.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue