HW core patch queue

. Deprecate unmaintained SH-4 models (Samuel)
 . HPET: Convert DPRINTF calls to trace events (Daniel)
 . Implement buffered block writes in Intel PFlash (Gerd)
 . Ignore ELF loadable segments with zero size (Bin)
 . ESP/NCR53C9x: PCI DMA fixes (Mark)
 . PIIX: Simplify Xen PCI IRQ routing (Bernhard)
 . Restrict CPU 'start-powered-off' property to sysemu (Phil)
 
 . target/alpha: Only build sys_helper.c on system emulation (Phil)
 . target/xtensa: Use generic instruction breakpoint API & add test (Max)
 . Restrict icount to system emulation (Phil)
 . Do not set CPUState TCG-specific flags in non-TCG accels (Phil)
 . Cleanup TCG tb_invalidate API (Phil)
 . Correct LoongArch/KVM include path (Bibo)
 . Do not ignore throttle errors in crypto backends (Phil)
 
 . MAINTAINERS updates (Raphael, Zhao)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t
 wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9
 y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o
 IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x
 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr
 z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR
 BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e
 T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr
 V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+
 OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y
 aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc
 DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs=
 =KmQh
 -----END PGP SIGNATURE-----

Merge tag 'hw-cpus-20240119' of https://github.com/philmd/qemu into staging

HW core patch queue

. Deprecate unmaintained SH-4 models (Samuel)
. HPET: Convert DPRINTF calls to trace events (Daniel)
. Implement buffered block writes in Intel PFlash (Gerd)
. Ignore ELF loadable segments with zero size (Bin)
. ESP/NCR53C9x: PCI DMA fixes (Mark)
. PIIX: Simplify Xen PCI IRQ routing (Bernhard)
. Restrict CPU 'start-powered-off' property to sysemu (Phil)

. target/alpha: Only build sys_helper.c on system emulation (Phil)
. target/xtensa: Use generic instruction breakpoint API & add test (Max)
. Restrict icount to system emulation (Phil)
. Do not set CPUState TCG-specific flags in non-TCG accels (Phil)
. Cleanup TCG tb_invalidate API (Phil)
. Correct LoongArch/KVM include path (Bibo)
. Do not ignore throttle errors in crypto backends (Phil)

. MAINTAINERS updates (Raphael, Zhao)

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t
# wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9
# y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o
# IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x
# 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr
# z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR
# BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e
# T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr
# V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+
# OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y
# aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc
# DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs=
# =KmQh
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 19 Jan 2024 11:32:09 GMT
# gpg:                using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD  6BB2 E3E3 2C2C DEAD C0DE

* tag 'hw-cpus-20240119' of https://github.com/philmd/qemu: (36 commits)
  configure: Add linux header compile support for LoongArch
  MAINTAINERS: Update hw/core/cpu.c entry
  MAINTAINERS: Update Raphael Norwitz email
  hw/elf_ops: Ignore loadable segments with zero size
  hw/scsi/esp-pci: set DMA_STAT_BCMBLT when BLAST command issued
  hw/scsi/esp-pci: synchronise setting of DMA_STAT_DONE with ESP completion interrupt
  hw/scsi/esp-pci: generate PCI interrupt from separate ESP and PCI sources
  hw/scsi/esp-pci: use correct address register for PCI DMA transfers
  target/riscv: Rename tcg_cpu_FOO() to include 'riscv'
  target/i386: Rename tcg_cpu_FOO() to include 'x86'
  hw/s390x: Rename cpu_class_init() to include 'sclp'
  hw/core/cpu: Rename cpu_class_init() to include 'common'
  accel: Rename accel_init_ops_interfaces() to include 'system'
  cpus: Restrict 'start-powered-off' property to system emulation
  system/watchpoint: Move TCG specific code to accel/tcg/
  system/replay: Restrict icount to system emulation
  hw/pflash: implement update buffer for block writes
  hw/pflash: use ldn_{be,le}_p and stn_{be,le}_p
  hw/pflash: refactor pflash_data_write()
  hw/i386/pc_piix: Make piix_intx_routing_notifier_xen() more device independent
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-01-19 11:39:38 +00:00
commit 3f2a357b95
50 changed files with 603 additions and 520 deletions

View file

@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
}
/* initialize the arch-independent accel operation interfaces */
void accel_init_ops_interfaces(AccelClass *ac)
void accel_system_init_ops_interfaces(AccelClass *ac)
{
const char *ac_name;
char *ops_name;

View file

@ -10,6 +10,6 @@
#ifndef ACCEL_SYSTEM_H
#define ACCEL_SYSTEM_H
void accel_init_ops_interfaces(AccelClass *ac);
void accel_system_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SYSTEM_H */

View file

@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
void accel_init_interfaces(AccelClass *ac)
{
#ifndef CONFIG_USER_ONLY
accel_init_ops_interfaces(ac);
accel_system_init_ops_interfaces(ac);
#endif /* !CONFIG_USER_ONLY */
accel_init_cpu_interfaces(ac);

View file

@ -27,7 +27,6 @@ static void *dummy_cpu_thread_fn(void *arg)
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
#ifndef _WIN32

View file

@ -428,7 +428,6 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
hvf_init_vcpu(cpu);

View file

@ -36,7 +36,6 @@ static void *kvm_vcpu_thread_fn(void *arg)
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal);

View file

@ -49,21 +49,19 @@ static bool icount_sleep = true;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
/*
* 0 = Do not count executed instructions.
* 1 = Fixed conversion of insn to ns via "shift" option
* 2 = Runtime adaptive algorithm to compute shift
*/
int use_icount;
/* Do not count executed instructions */
ICountMode use_icount = ICOUNT_DISABLED;
static void icount_enable_precise(void)
{
use_icount = 1;
/* Fixed conversion of insn to ns via "shift" option */
use_icount = ICOUNT_PRECISE;
}
static void icount_enable_adaptive(void)
{
use_icount = 2;
/* Runtime adaptive algorithm to compute shift */
use_icount = ICOUNT_ADAPTATIVE;
}
/*
@ -256,7 +254,7 @@ static void icount_warp_rt(void)
int64_t warp_delta;
warp_delta = clock - timers_state.vm_clock_warp_start;
if (icount_enabled() == 2) {
if (icount_enabled() == ICOUNT_ADAPTATIVE) {
/*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
* ahead of real time (it might already be ahead so careful not
@ -419,7 +417,7 @@ void icount_account_warp_timer(void)
icount_warp_rt();
}
void icount_configure(QemuOpts *opts, Error **errp)
bool icount_configure(QemuOpts *opts, Error **errp)
{
const char *option = qemu_opt_get(opts, "shift");
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
@ -429,27 +427,28 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (!option) {
if (qemu_opt_get(opts, "align") != NULL) {
error_setg(errp, "Please specify shift option when using align");
return false;
}
return;
return true;
}
if (align && !sleep) {
error_setg(errp, "align=on and sleep=off are incompatible");
return;
return false;
}
if (strcmp(option, "auto") != 0) {
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
error_setg(errp, "icount: Invalid shift value");
return;
return false;
}
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
return;
return false;
} else if (!icount_sleep) {
error_setg(errp, "shift=auto and sleep=off are incompatible");
return;
return false;
}
icount_sleep = sleep;
@ -463,7 +462,7 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (time_shift >= 0) {
timers_state.icount_time_shift = time_shift;
icount_enable_precise();
return;
return true;
}
icount_enable_adaptive();
@ -491,11 +490,14 @@ void icount_configure(QemuOpts *opts, Error **errp)
timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
return true;
}
void icount_notify_exit(void)
{
if (icount_enabled() && current_cpu) {
assert(icount_enabled());
if (current_cpu) {
qemu_cpu_kick(current_cpu);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}

View file

@ -24,6 +24,7 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
'watchpoint.c',
))
system_ss.add(when: ['CONFIG_TCG'], if_true: files(

View file

@ -1021,7 +1021,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
* Called with mmap_lock held for user-mode emulation
* NOTE: this function must not be called while a TB is running.
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
static void tb_invalidate_phys_page(tb_page_addr_t addr)
{
tb_page_addr_t start, last;
@ -1160,28 +1160,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#endif
}
/*
* Invalidate all TBs which intersect with the target physical
* address page @addr.
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
if (p == NULL) {
return;
}
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
page_collection_unlock(pages);
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;last]. NOTE: start and end may refer to *different* physical pages.

143
accel/tcg/watchpoint.c Normal file
View file

@ -0,0 +1,143 @@
/*
* CPU watchpoints
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "hw/core/tcg-cpu-ops.h"
#include "hw/core/cpu.h"
/*
* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
* access).
*/
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/*
* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
* exactly at the top of the address space and so addr + len
* wraps round to zero.
*/
vaddr wpend = wp->vaddr + wp->len - 1;
vaddr addrend = addr + len - 1;
return !(addr > wpend || wp->vaddr > addrend);
}
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
return ret;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
bql_lock();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
bql_unlock();
return;
}
if (cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
assert((flags & ~BP_MEM_ACCESS) == 0);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
int hit_flags = wp->flags & flags;
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
if (replay_running_debug()) {
/*
* replay_breakpoint reads icount.
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
wp->flags |= hit_flags << BP_HIT_SHIFT;
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (wp->flags & BP_CPU
&& cc->tcg_ops->debug_check_watchpoint
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}