mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 16:53:55 -06:00
target/i386/sev: Perform padding calculations at compile-time
In sev_add_kernel_loader_hashes, the sizes of structs are known at compile-time, so calculate needed padding at compile-time. No functional change intended. Signed-off-by: Dov Murik <dovmurik@linux.ibm.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Acked-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
parent
a0190bf150
commit
ddcc0d898e
1 changed files with 18 additions and 10 deletions
|
@ -110,9 +110,19 @@ typedef struct QEMU_PACKED SevHashTable {
|
||||||
SevHashTableEntry cmdline;
|
SevHashTableEntry cmdline;
|
||||||
SevHashTableEntry initrd;
|
SevHashTableEntry initrd;
|
||||||
SevHashTableEntry kernel;
|
SevHashTableEntry kernel;
|
||||||
uint8_t padding[];
|
|
||||||
} SevHashTable;
|
} SevHashTable;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Data encrypted by sev_encrypt_flash() must be padded to a multiple of
|
||||||
|
* 16 bytes.
|
||||||
|
*/
|
||||||
|
typedef struct QEMU_PACKED PaddedSevHashTable {
|
||||||
|
SevHashTable ht;
|
||||||
|
uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)];
|
||||||
|
} PaddedSevHashTable;
|
||||||
|
|
||||||
|
QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0);
|
||||||
|
|
||||||
static SevGuestState *sev_guest;
|
static SevGuestState *sev_guest;
|
||||||
static Error *sev_mig_blocker;
|
static Error *sev_mig_blocker;
|
||||||
|
|
||||||
|
@ -1216,12 +1226,12 @@ bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
|
||||||
uint8_t *data;
|
uint8_t *data;
|
||||||
SevHashTableDescriptor *area;
|
SevHashTableDescriptor *area;
|
||||||
SevHashTable *ht;
|
SevHashTable *ht;
|
||||||
|
PaddedSevHashTable *padded_ht;
|
||||||
uint8_t cmdline_hash[HASH_SIZE];
|
uint8_t cmdline_hash[HASH_SIZE];
|
||||||
uint8_t initrd_hash[HASH_SIZE];
|
uint8_t initrd_hash[HASH_SIZE];
|
||||||
uint8_t kernel_hash[HASH_SIZE];
|
uint8_t kernel_hash[HASH_SIZE];
|
||||||
uint8_t *hashp;
|
uint8_t *hashp;
|
||||||
size_t hash_len = HASH_SIZE;
|
size_t hash_len = HASH_SIZE;
|
||||||
int aligned_len = ROUND_UP(sizeof(SevHashTable), 16);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only add the kernel hashes if the sev-guest configuration explicitly
|
* Only add the kernel hashes if the sev-guest configuration explicitly
|
||||||
|
@ -1237,7 +1247,7 @@ bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
area = (SevHashTableDescriptor *)data;
|
area = (SevHashTableDescriptor *)data;
|
||||||
if (!area->base || area->size < aligned_len) {
|
if (!area->base || area->size < sizeof(PaddedSevHashTable)) {
|
||||||
error_setg(errp, "SEV: guest firmware hashes table area is invalid "
|
error_setg(errp, "SEV: guest firmware hashes table area is invalid "
|
||||||
"(base=0x%x size=0x%x)", area->base, area->size);
|
"(base=0x%x size=0x%x)", area->base, area->size);
|
||||||
return false;
|
return false;
|
||||||
|
@ -1282,7 +1292,8 @@ bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
|
||||||
* Populate the hashes table in the guest's memory at the OVMF-designated
|
* Populate the hashes table in the guest's memory at the OVMF-designated
|
||||||
* area for the SEV hashes table
|
* area for the SEV hashes table
|
||||||
*/
|
*/
|
||||||
ht = qemu_map_ram_ptr(NULL, area->base);
|
padded_ht = qemu_map_ram_ptr(NULL, area->base);
|
||||||
|
ht = &padded_ht->ht;
|
||||||
|
|
||||||
ht->guid = sev_hash_table_header_guid;
|
ht->guid = sev_hash_table_header_guid;
|
||||||
ht->len = sizeof(*ht);
|
ht->len = sizeof(*ht);
|
||||||
|
@ -1299,13 +1310,10 @@ bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
|
||||||
ht->kernel.len = sizeof(ht->kernel);
|
ht->kernel.len = sizeof(ht->kernel);
|
||||||
memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
|
memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
|
||||||
|
|
||||||
/* When calling sev_encrypt_flash, the length has to be 16 byte aligned */
|
|
||||||
if (aligned_len != ht->len) {
|
|
||||||
/* zero the excess data so the measurement can be reliably calculated */
|
/* zero the excess data so the measurement can be reliably calculated */
|
||||||
memset(ht->padding, 0, aligned_len - ht->len);
|
memset(padded_ht->padding, 0, sizeof(padded_ht->padding));
|
||||||
}
|
|
||||||
|
|
||||||
if (sev_encrypt_flash((uint8_t *)ht, aligned_len, errp) < 0) {
|
if (sev_encrypt_flash((uint8_t *)padded_ht, sizeof(*padded_ht), errp) < 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue