mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
accel/tcg: Do not issue misaligned i/o
In the single-page case we were issuing misaligned i/o to the memory subsystem, which does not handle it properly. Split such accesses via do_{ld,st}_mmio_*. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1800 Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
190aba803f
commit
f7eaf9d702
1 changed files with 72 additions and 46 deletions
|
@ -2370,16 +2370,20 @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
|
||||||
static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
|
static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
|
||||||
MMUAccessType type, MemOp memop, uintptr_t ra)
|
MMUAccessType type, MemOp memop, uintptr_t ra)
|
||||||
{
|
{
|
||||||
uint64_t ret;
|
uint16_t ret;
|
||||||
|
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
}
|
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
|
||||||
|
if ((memop & MO_BSWAP) == MO_LE) {
|
||||||
/* Perform the load host endian, then swap if necessary. */
|
ret = bswap16(ret);
|
||||||
ret = load_atom_2(env, ra, p->haddr, memop);
|
}
|
||||||
if (memop & MO_BSWAP) {
|
} else {
|
||||||
ret = bswap16(ret);
|
/* Perform the load host endian, then swap if necessary. */
|
||||||
|
ret = load_atom_2(env, ra, p->haddr, memop);
|
||||||
|
if (memop & MO_BSWAP) {
|
||||||
|
ret = bswap16(ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2390,13 +2394,17 @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
|
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
}
|
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
|
||||||
|
if ((memop & MO_BSWAP) == MO_LE) {
|
||||||
/* Perform the load host endian. */
|
ret = bswap32(ret);
|
||||||
ret = load_atom_4(env, ra, p->haddr, memop);
|
}
|
||||||
if (memop & MO_BSWAP) {
|
} else {
|
||||||
ret = bswap32(ret);
|
/* Perform the load host endian. */
|
||||||
|
ret = load_atom_4(env, ra, p->haddr, memop);
|
||||||
|
if (memop & MO_BSWAP) {
|
||||||
|
ret = bswap32(ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2407,13 +2415,17 @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
}
|
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
|
||||||
|
if ((memop & MO_BSWAP) == MO_LE) {
|
||||||
/* Perform the load host endian. */
|
ret = bswap64(ret);
|
||||||
ret = load_atom_8(env, ra, p->haddr, memop);
|
}
|
||||||
if (memop & MO_BSWAP) {
|
} else {
|
||||||
ret = bswap64(ret);
|
/* Perform the load host endian. */
|
||||||
|
ret = load_atom_8(env, ra, p->haddr, memop);
|
||||||
|
if (memop & MO_BSWAP) {
|
||||||
|
ret = bswap64(ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2561,20 +2573,22 @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
|
||||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
/* Perform the load host endian. */
|
|
||||||
if (unlikely(l.page[0].flags & TLB_MMIO)) {
|
if (unlikely(l.page[0].flags & TLB_MMIO)) {
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
a = io_readx(env, l.page[0].full, l.mmu_idx, addr,
|
a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
|
||||||
ra, MMU_DATA_LOAD, MO_64);
|
l.mmu_idx, MMU_DATA_LOAD, ra);
|
||||||
b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8,
|
b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
|
||||||
ra, MMU_DATA_LOAD, MO_64);
|
l.mmu_idx, MMU_DATA_LOAD, ra);
|
||||||
ret = int128_make128(HOST_BIG_ENDIAN ? b : a,
|
ret = int128_make128(b, a);
|
||||||
HOST_BIG_ENDIAN ? a : b);
|
if ((l.memop & MO_BSWAP) == MO_LE) {
|
||||||
|
ret = bswap128(ret);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
/* Perform the load host endian. */
|
||||||
ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
|
ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
|
||||||
}
|
if (l.memop & MO_BSWAP) {
|
||||||
if (l.memop & MO_BSWAP) {
|
ret = bswap128(ret);
|
||||||
ret = bswap128(ret);
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2874,7 +2888,11 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
|
||||||
int mmu_idx, MemOp memop, uintptr_t ra)
|
int mmu_idx, MemOp memop, uintptr_t ra)
|
||||||
{
|
{
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
|
if ((memop & MO_BSWAP) != MO_LE) {
|
||||||
|
val = bswap16(val);
|
||||||
|
}
|
||||||
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
|
||||||
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
||||||
/* nothing */
|
/* nothing */
|
||||||
} else {
|
} else {
|
||||||
|
@ -2890,7 +2908,11 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
|
||||||
int mmu_idx, MemOp memop, uintptr_t ra)
|
int mmu_idx, MemOp memop, uintptr_t ra)
|
||||||
{
|
{
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
|
if ((memop & MO_BSWAP) != MO_LE) {
|
||||||
|
val = bswap32(val);
|
||||||
|
}
|
||||||
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
|
||||||
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
||||||
/* nothing */
|
/* nothing */
|
||||||
} else {
|
} else {
|
||||||
|
@ -2906,7 +2928,11 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
|
||||||
int mmu_idx, MemOp memop, uintptr_t ra)
|
int mmu_idx, MemOp memop, uintptr_t ra)
|
||||||
{
|
{
|
||||||
if (unlikely(p->flags & TLB_MMIO)) {
|
if (unlikely(p->flags & TLB_MMIO)) {
|
||||||
io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
|
if ((memop & MO_BSWAP) != MO_LE) {
|
||||||
|
val = bswap64(val);
|
||||||
|
}
|
||||||
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
|
||||||
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
|
||||||
/* nothing */
|
/* nothing */
|
||||||
} else {
|
} else {
|
||||||
|
@ -3029,22 +3055,22 @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
|
||||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
/* Swap to host endian if necessary, then store. */
|
|
||||||
if (l.memop & MO_BSWAP) {
|
|
||||||
val = bswap128(val);
|
|
||||||
}
|
|
||||||
if (unlikely(l.page[0].flags & TLB_MMIO)) {
|
if (unlikely(l.page[0].flags & TLB_MMIO)) {
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
if ((l.memop & MO_BSWAP) != MO_LE) {
|
||||||
if (HOST_BIG_ENDIAN) {
|
val = bswap128(val);
|
||||||
b = int128_getlo(val), a = int128_gethi(val);
|
|
||||||
} else {
|
|
||||||
a = int128_getlo(val), b = int128_gethi(val);
|
|
||||||
}
|
}
|
||||||
io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64);
|
a = int128_getlo(val);
|
||||||
io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64);
|
b = int128_gethi(val);
|
||||||
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
|
||||||
|
do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
|
||||||
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
|
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
|
||||||
/* nothing */
|
/* nothing */
|
||||||
} else {
|
} else {
|
||||||
|
/* Swap to host endian if necessary, then store. */
|
||||||
|
if (l.memop & MO_BSWAP) {
|
||||||
|
val = bswap128(val);
|
||||||
|
}
|
||||||
store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
|
store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue