mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 02:24:58 -06:00
physmem: Factor out body of flatview_read/write_continue() loop
This code will be reused for the address_space_cached accessors shortly. Also reduce scope of result variable now we aren't directly calling this in the loop. Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: David Hildenbrand <david@redhat.com> Link: https://lore.kernel.org/r/20240307153710.30907-4-Jonathan.Cameron@huawei.com Signed-off-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
parent
bcfd8ba4f5
commit
e7927d33cf
1 changed files with 99 additions and 70 deletions
169
system/physmem.c
169
system/physmem.c
|
@ -2681,6 +2681,56 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
|
||||||
|
const uint8_t *buf,
|
||||||
|
hwaddr len, hwaddr mr_addr,
|
||||||
|
hwaddr *l, MemoryRegion *mr)
|
||||||
|
{
|
||||||
|
if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
|
||||||
|
return MEMTX_ACCESS_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!memory_access_is_direct(mr, true)) {
|
||||||
|
uint64_t val;
|
||||||
|
MemTxResult result;
|
||||||
|
bool release_lock = prepare_mmio_access(mr);
|
||||||
|
|
||||||
|
*l = memory_access_size(mr, *l, mr_addr);
|
||||||
|
/*
|
||||||
|
* XXX: could force current_cpu to NULL to avoid
|
||||||
|
* potential bugs
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assure Coverity (and ourselves) that we are not going to OVERRUN
|
||||||
|
* the buffer by following ldn_he_p().
|
||||||
|
*/
|
||||||
|
#ifdef QEMU_STATIC_ANALYSIS
|
||||||
|
assert((*l == 1 && len >= 1) ||
|
||||||
|
(*l == 2 && len >= 2) ||
|
||||||
|
(*l == 4 && len >= 4) ||
|
||||||
|
(*l == 8 && len >= 8));
|
||||||
|
#endif
|
||||||
|
val = ldn_he_p(buf, *l);
|
||||||
|
result = memory_region_dispatch_write(mr, mr_addr, val,
|
||||||
|
size_memop(*l), attrs);
|
||||||
|
if (release_lock) {
|
||||||
|
bql_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
/* RAM case */
|
||||||
|
uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
|
||||||
|
false);
|
||||||
|
|
||||||
|
memmove(ram_ptr, buf, *l);
|
||||||
|
invalidate_and_set_dirty(mr, mr_addr, *l);
|
||||||
|
|
||||||
|
return MEMTX_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Called within RCU critical section. */
|
/* Called within RCU critical section. */
|
||||||
static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
||||||
MemTxAttrs attrs,
|
MemTxAttrs attrs,
|
||||||
|
@ -2692,44 +2742,8 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
||||||
const uint8_t *buf = ptr;
|
const uint8_t *buf = ptr;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!flatview_access_allowed(mr, attrs, mr_addr, l)) {
|
result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
|
||||||
result |= MEMTX_ACCESS_ERROR;
|
mr);
|
||||||
/* Keep going. */
|
|
||||||
} else if (!memory_access_is_direct(mr, true)) {
|
|
||||||
uint64_t val;
|
|
||||||
bool release_lock = prepare_mmio_access(mr);
|
|
||||||
|
|
||||||
l = memory_access_size(mr, l, mr_addr);
|
|
||||||
/* XXX: could force current_cpu to NULL to avoid
|
|
||||||
potential bugs */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Assure Coverity (and ourselves) that we are not going to OVERRUN
|
|
||||||
* the buffer by following ldn_he_p().
|
|
||||||
*/
|
|
||||||
#ifdef QEMU_STATIC_ANALYSIS
|
|
||||||
assert((l == 1 && len >= 1) ||
|
|
||||||
(l == 2 && len >= 2) ||
|
|
||||||
(l == 4 && len >= 4) ||
|
|
||||||
(l == 8 && len >= 8));
|
|
||||||
#endif
|
|
||||||
val = ldn_he_p(buf, l);
|
|
||||||
result |= memory_region_dispatch_write(mr, mr_addr, val,
|
|
||||||
size_memop(l), attrs);
|
|
||||||
if (release_lock) {
|
|
||||||
bql_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} else {
|
|
||||||
/* RAM case */
|
|
||||||
|
|
||||||
uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, &l,
|
|
||||||
false);
|
|
||||||
|
|
||||||
memmove(ram_ptr, buf, l);
|
|
||||||
invalidate_and_set_dirty(mr, mr_addr, l);
|
|
||||||
}
|
|
||||||
|
|
||||||
len -= l;
|
len -= l;
|
||||||
buf += l;
|
buf += l;
|
||||||
|
@ -2763,6 +2777,52 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
|
||||||
mr_addr, l, mr);
|
mr_addr, l, mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
|
||||||
|
hwaddr len, hwaddr mr_addr,
|
||||||
|
hwaddr *l,
|
||||||
|
MemoryRegion *mr)
|
||||||
|
{
|
||||||
|
if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
|
||||||
|
return MEMTX_ACCESS_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!memory_access_is_direct(mr, false)) {
|
||||||
|
/* I/O case */
|
||||||
|
uint64_t val;
|
||||||
|
MemTxResult result;
|
||||||
|
bool release_lock = prepare_mmio_access(mr);
|
||||||
|
|
||||||
|
*l = memory_access_size(mr, *l, mr_addr);
|
||||||
|
result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l),
|
||||||
|
attrs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assure Coverity (and ourselves) that we are not going to OVERRUN
|
||||||
|
* the buffer by following stn_he_p().
|
||||||
|
*/
|
||||||
|
#ifdef QEMU_STATIC_ANALYSIS
|
||||||
|
assert((*l == 1 && len >= 1) ||
|
||||||
|
(*l == 2 && len >= 2) ||
|
||||||
|
(*l == 4 && len >= 4) ||
|
||||||
|
(*l == 8 && len >= 8));
|
||||||
|
#endif
|
||||||
|
stn_he_p(buf, *l, val);
|
||||||
|
|
||||||
|
if (release_lock) {
|
||||||
|
bql_unlock();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
/* RAM case */
|
||||||
|
uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
|
||||||
|
false);
|
||||||
|
|
||||||
|
memcpy(buf, ram_ptr, *l);
|
||||||
|
|
||||||
|
return MEMTX_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Called within RCU critical section. */
|
/* Called within RCU critical section. */
|
||||||
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
||||||
MemTxAttrs attrs, void *ptr,
|
MemTxAttrs attrs, void *ptr,
|
||||||
|
@ -2774,38 +2834,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
||||||
|
|
||||||
fuzz_dma_read_cb(addr, len, mr);
|
fuzz_dma_read_cb(addr, len, mr);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!flatview_access_allowed(mr, attrs, mr_addr, l)) {
|
result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
|
||||||
result |= MEMTX_ACCESS_ERROR;
|
|
||||||
/* Keep going. */
|
|
||||||
} else if (!memory_access_is_direct(mr, false)) {
|
|
||||||
/* I/O case */
|
|
||||||
uint64_t val;
|
|
||||||
bool release_lock = prepare_mmio_access(mr);
|
|
||||||
|
|
||||||
l = memory_access_size(mr, l, mr_addr);
|
|
||||||
result |= memory_region_dispatch_read(mr, mr_addr, &val,
|
|
||||||
size_memop(l), attrs);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Assure Coverity (and ourselves) that we are not going to OVERRUN
|
|
||||||
* the buffer by following stn_he_p().
|
|
||||||
*/
|
|
||||||
#ifdef QEMU_STATIC_ANALYSIS
|
|
||||||
assert((l == 1 && len >= 1) ||
|
|
||||||
(l == 2 && len >= 2) ||
|
|
||||||
(l == 4 && len >= 4) ||
|
|
||||||
(l == 8 && len >= 8));
|
|
||||||
#endif
|
|
||||||
stn_he_p(buf, l, val);
|
|
||||||
if (release_lock) {
|
|
||||||
bql_unlock();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* RAM case */
|
|
||||||
uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, &l,
|
|
||||||
false);
|
|
||||||
memcpy(buf, ram_ptr, l);
|
|
||||||
}
|
|
||||||
|
|
||||||
len -= l;
|
len -= l;
|
||||||
buf += l;
|
buf += l;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue