mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-11 03:24:58 -06:00
hw/misc/aspeed_hace: Remove unused code for better readability
In the previous design of the hash framework, accumulative hashing was not supported. To work around this limitation, commit5cd7d85
introduced an iov_cache array to store all the hash data from firmware. Once the ASPEED HACE model collected all the data, it passed the iov_cache to the hash API to calculate the final digest. However, with commite3c0752
, the hash framework now supports accumulative hashing. This allows us to refactor the ASPEED HACE model, removing redundant logic and simplifying the implementation for better readability and maintainability. As a result, the iov_count variable is no longer needed—it was previously used to track how many cached entries were used for hashing. To maintain VMSTATE compatibility after removing this field, the VMSTATE_VERSION is bumped to 2 This cleanup follows significant changes in commit4c1d0af4a2
, making the model more readable. - Deleted "iov_cache" and "iov_count" from "AspeedHACEState". - Removed "reconstruct_iov" function and related logic. - Simplified "do_hash_operation" by eliminating redundant checks. Signed-off-by: Jamin Lin <jamin_lin@aspeedtech.com> Reviewed-by: Cédric Le Goater <clg@redhat.com> Link: https://lore.kernel.org/qemu-devel/20250515081008.583578-2-jamin_lin@aspeedtech.com Signed-off-by: Cédric Le Goater <clg@redhat.com>
This commit is contained in:
parent
d09c0939c9
commit
f05cc69c6c
2 changed files with 2 additions and 39 deletions
|
@ -142,25 +142,6 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
|
|
||||||
uint32_t *pad_offset)
|
|
||||||
{
|
|
||||||
int i, iov_count;
|
|
||||||
if (*pad_offset != 0) {
|
|
||||||
s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
|
|
||||||
s->iov_cache[s->iov_count].iov_len = *pad_offset;
|
|
||||||
++s->iov_count;
|
|
||||||
}
|
|
||||||
for (i = 0; i < s->iov_count; i++) {
|
|
||||||
iov[i].iov_base = s->iov_cache[i].iov_base;
|
|
||||||
iov[i].iov_len = s->iov_cache[i].iov_len;
|
|
||||||
}
|
|
||||||
iov_count = s->iov_count;
|
|
||||||
s->iov_count = 0;
|
|
||||||
s->total_req_len = 0;
|
|
||||||
return iov_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
||||||
bool acc_mode)
|
bool acc_mode)
|
||||||
{
|
{
|
||||||
|
@ -242,19 +223,6 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
||||||
iov[0].iov_base = haddr;
|
iov[0].iov_base = haddr;
|
||||||
iov[0].iov_len = len;
|
iov[0].iov_len = len;
|
||||||
i = 1;
|
i = 1;
|
||||||
|
|
||||||
if (s->iov_count) {
|
|
||||||
/*
|
|
||||||
* In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
|
|
||||||
* Thus if we received a request with sg_mode disabled, it is
|
|
||||||
* required to check whether cache is empty. If no, we should
|
|
||||||
* combine cached iov and the current iov.
|
|
||||||
*/
|
|
||||||
s->total_req_len += len;
|
|
||||||
if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
|
|
||||||
i = reconstruct_iov(s, iov, 0, &pad_offset);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (acc_mode) {
|
if (acc_mode) {
|
||||||
|
@ -278,7 +246,6 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
||||||
qcrypto_hash_free(s->hash_ctx);
|
qcrypto_hash_free(s->hash_ctx);
|
||||||
|
|
||||||
s->hash_ctx = NULL;
|
s->hash_ctx = NULL;
|
||||||
s->iov_count = 0;
|
|
||||||
s->total_req_len = 0;
|
s->total_req_len = 0;
|
||||||
}
|
}
|
||||||
} else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
|
} else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
|
||||||
|
@ -437,7 +404,6 @@ static void aspeed_hace_reset(DeviceState *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(s->regs, 0, sizeof(s->regs));
|
memset(s->regs, 0, sizeof(s->regs));
|
||||||
s->iov_count = 0;
|
|
||||||
s->total_req_len = 0;
|
s->total_req_len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -469,12 +435,11 @@ static const Property aspeed_hace_properties[] = {
|
||||||
|
|
||||||
static const VMStateDescription vmstate_aspeed_hace = {
|
static const VMStateDescription vmstate_aspeed_hace = {
|
||||||
.name = TYPE_ASPEED_HACE,
|
.name = TYPE_ASPEED_HACE,
|
||||||
.version_id = 1,
|
.version_id = 2,
|
||||||
.minimum_version_id = 1,
|
.minimum_version_id = 2,
|
||||||
.fields = (const VMStateField[]) {
|
.fields = (const VMStateField[]) {
|
||||||
VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
|
VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
|
||||||
VMSTATE_UINT32(total_req_len, AspeedHACEState),
|
VMSTATE_UINT32(total_req_len, AspeedHACEState),
|
||||||
VMSTATE_UINT32(iov_count, AspeedHACEState),
|
|
||||||
VMSTATE_END_OF_LIST(),
|
VMSTATE_END_OF_LIST(),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,10 +31,8 @@ struct AspeedHACEState {
|
||||||
MemoryRegion iomem;
|
MemoryRegion iomem;
|
||||||
qemu_irq irq;
|
qemu_irq irq;
|
||||||
|
|
||||||
struct iovec iov_cache[ASPEED_HACE_MAX_SG];
|
|
||||||
uint32_t regs[ASPEED_HACE_NR_REGS];
|
uint32_t regs[ASPEED_HACE_NR_REGS];
|
||||||
uint32_t total_req_len;
|
uint32_t total_req_len;
|
||||||
uint32_t iov_count;
|
|
||||||
|
|
||||||
MemoryRegion *dram_mr;
|
MemoryRegion *dram_mr;
|
||||||
AddressSpace dram_as;
|
AddressSpace dram_as;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue