Enable PV backends with Xen/KVM emulation

This is phase 2, following on from the basic platform support which was
 already merged.
 
  • Add a simple single-tenant internal XenStore implementation
  • Indirect Xen gnttab/evtchn/foreignmem/xenstore through operations table
  • Provide emulated back ends for Xen operations
  • Header cleanups to allow PV back ends to build without Xen itself
  • Enable PV back ends in emulated mode
  • Documentation update
 
 Tested-by: Paul Durrant <paul@xen.org>
 ... on real Xen (master branch, 4.18) with a Debian guest.
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCgAwFiEEMUsIrNDeSBEzpfKGm+mA/QrAFUQFAmQHu3wSHGR3bXdAYW1h
 em9uLmNvLnVrAAoJEJvpgP0KwBVE5LYP/0VodDsQdP7Z4L+/IzgBSgEec7qmyQFB
 KlBZS/PmvCZKb0DHLI3GhXIyzD+/fnLtGSRl0rYObnKP7im+MpEDGmn97f6nIITk
 AzkdsVhNEBQFXCkLgQ9y8kTrTmsod9O4sqn0+naa2TX4FPcRN0MaNmpuLEubvaRS
 +JuyHmwy9ZeeAnsU31uJ0nx4F1hW9IDaatNoDeFcFnKCXQp36rtdZUViMowUJvwu
 Q+Xyg6dybusznaoiXd485tTPrTt+FK/wEARse3q2gRh9QblLu0r5BFb0rOfhYCTQ
 jw+5lBsOX+UlffmB9IDakRpVe4RKhvvRQSkRvYkPCshsqud9zMGhaquKg1vKBgca
 I31XSN0LCcon/ahHGtmVAxyZUpWdEnfzO1TbTNpz9oacROklgVgEYdw5Vwca71VD
 SURl6uCt9Jb9WmsR4twus4i4qDjQIDOtOF0hcxpl7HGktkxlGxUVI4qVLXARtVCS
 OTB6N0LlhJ2woj2wYK5BRTiOj03T2MkJEWaYhDdIrQREKWe2Sn4xTOH5kGbQQnOr
 km93odjBZFRHsAUnzXHXW3+yHjMefH7KrHePbmvsO4foGF77bBxosuC2ehFfvNJ0
 VM/H04NDtPYCBwdAr545PSN/q+WzEPQaquLZ0UuTBuPpMMOYd+Ff8YvQWJPyCM18
 1mq9v6Xe9RQZ
 =JGLX
 -----END PGP SIGNATURE-----

Merge tag 'xenfv-2' of git://git.infradead.org/users/dwmw2/qemu into staging

Enable PV backends with Xen/KVM emulation

This is phase 2, following on from the basic platform support which was
already merged.

 • Add a simple single-tenant internal XenStore implementation
 • Indirect Xen gnttab/evtchn/foreignmem/xenstore through operations table
 • Provide emulated back ends for Xen operations
 • Header cleanups to allow PV back ends to build without Xen itself
 • Enable PV back ends in emulated mode
 • Documentation update

Tested-by: Paul Durrant <paul@xen.org>
... on real Xen (master branch, 4.18) with a Debian guest.

# -----BEGIN PGP SIGNATURE-----
#
# iQJGBAABCgAwFiEEMUsIrNDeSBEzpfKGm+mA/QrAFUQFAmQHu3wSHGR3bXdAYW1h
# em9uLmNvLnVrAAoJEJvpgP0KwBVE5LYP/0VodDsQdP7Z4L+/IzgBSgEec7qmyQFB
# KlBZS/PmvCZKb0DHLI3GhXIyzD+/fnLtGSRl0rYObnKP7im+MpEDGmn97f6nIITk
# AzkdsVhNEBQFXCkLgQ9y8kTrTmsod9O4sqn0+naa2TX4FPcRN0MaNmpuLEubvaRS
# +JuyHmwy9ZeeAnsU31uJ0nx4F1hW9IDaatNoDeFcFnKCXQp36rtdZUViMowUJvwu
# Q+Xyg6dybusznaoiXd485tTPrTt+FK/wEARse3q2gRh9QblLu0r5BFb0rOfhYCTQ
# jw+5lBsOX+UlffmB9IDakRpVe4RKhvvRQSkRvYkPCshsqud9zMGhaquKg1vKBgca
# I31XSN0LCcon/ahHGtmVAxyZUpWdEnfzO1TbTNpz9oacROklgVgEYdw5Vwca71VD
# SURl6uCt9Jb9WmsR4twus4i4qDjQIDOtOF0hcxpl7HGktkxlGxUVI4qVLXARtVCS
# OTB6N0LlhJ2woj2wYK5BRTiOj03T2MkJEWaYhDdIrQREKWe2Sn4xTOH5kGbQQnOr
# km93odjBZFRHsAUnzXHXW3+yHjMefH7KrHePbmvsO4foGF77bBxosuC2ehFfvNJ0
# VM/H04NDtPYCBwdAr545PSN/q+WzEPQaquLZ0UuTBuPpMMOYd+Ff8YvQWJPyCM18
# 1mq9v6Xe9RQZ
# =JGLX
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 07 Mar 2023 22:32:28 GMT
# gpg:                using RSA key 314B08ACD0DE481133A5F2869BE980FD0AC01544
# gpg:                issuer "dwmw@amazon.co.uk"
# gpg: Good signature from "David Woodhouse <dwmw@amazon.co.uk>" [unknown]
# gpg:                 aka "David Woodhouse <dwmw@amazon.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 314B 08AC D0DE 4811 33A5  F286 9BE9 80FD 0AC0 1544

* tag 'xenfv-2' of git://git.infradead.org/users/dwmw2/qemu: (27 commits)
  docs: Update Xen-on-KVM documentation for PV disk support
  MAINTAINERS: Add entry for Xen on KVM emulation
  i386/xen: Initialize Xen backends from pc_basic_device_init() for emulation
  hw/xen: Implement soft reset for emulated gnttab
  hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore
  hw/xen: Add emulated implementation of XenStore operations
  hw/xen: Add emulated implementation of grant table operations
  hw/xen: Hook up emulated implementation for event channel operations
  hw/xen: Only advertise ring-page-order for xen-block if gnttab supports it
  hw/xen: Avoid crash when backend watch fires too early
  hw/xen: Build PV backend drivers for CONFIG_XEN_BUS
  hw/xen: Rename xen_common.h to xen_native.h
  hw/xen: Use XEN_PAGE_SIZE in PV backend drivers
  hw/xen: Move xenstore_store_pv_console_info to xen_console.c
  hw/xen: Add xenstore operations to allow redirection to internal emulation
  hw/xen: Add foreignmem operations to allow redirection to internal emulation
  hw/xen: Pass grant ref to gnttab unmap operation
  hw/xen: Add gnttab operations to allow redirection to internal emulation
  hw/xen: Add evtchn operations to allow redirection to internal emulation
  hw/xen: Create initial XenStore nodes
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2023-03-09 13:22:05 +00:00
commit 15002921e8
53 changed files with 5811 additions and 934 deletions

View file

@ -8,40 +8,40 @@
#ifndef HW_XEN_BUS_HELPER_H
#define HW_XEN_BUS_HELPER_H
#include "hw/xen/xen_common.h"
#include "hw/xen/xen_backend_ops.h"
const char *xs_strstate(enum xenbus_state state);
void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, struct xs_permissions perms[],
unsigned int nr_perms, Error **errp);
void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, unsigned int owner, unsigned int domid,
unsigned int perms, Error **errp);
void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, Error **errp);
/* Write to node/key unless node is empty, in which case write to key */
void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
G_GNUC_PRINTF(6, 0);
void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
G_GNUC_PRINTF(6, 7);
/* Read from node/key unless node is empty, in which case read from key */
int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
G_GNUC_SCANF(6, 0);
int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
G_GNUC_SCANF(6, 7);
/* Watch node/key unless node is empty, in which case watch key */
void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
char *token, Error **errp);
void xs_node_unwatch(struct xs_handle *xsh, const char *node, const char *key,
const char *token, Error **errp);
struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
const char *key, xs_watch_fn fn,
void *opaque, Error **errp);
void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
#endif /* HW_XEN_BUS_HELPER_H */

View file

@ -8,31 +8,25 @@
#ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H
#include "hw/xen/xen_common.h"
#include "hw/xen/xen_backend_ops.h"
#include "hw/sysbus.h"
#include "qemu/notify.h"
#include "qom/object.h"
typedef void (*XenWatchHandler)(void *opaque);
typedef struct XenWatchList XenWatchList;
typedef struct XenWatch XenWatch;
typedef struct XenEventChannel XenEventChannel;
struct XenDevice {
DeviceState qdev;
domid_t frontend_id;
char *name;
struct xs_handle *xsh;
XenWatchList *watch_list;
struct qemu_xs_handle *xsh;
char *backend_path, *frontend_path;
enum xenbus_state backend_state, frontend_state;
Notifier exit;
XenWatch *backend_state_watch, *frontend_state_watch;
struct qemu_xs_watch *backend_state_watch, *frontend_state_watch;
bool backend_online;
XenWatch *backend_online_watch;
struct qemu_xs_watch *backend_online_watch;
xengnttab_handle *xgth;
bool feature_grant_copy;
bool inactive;
QLIST_HEAD(, XenEventChannel) event_channels;
QLIST_ENTRY(XenDevice) list;
@ -64,10 +58,9 @@ OBJECT_DECLARE_TYPE(XenDevice, XenDeviceClass, XEN_DEVICE)
struct XenBus {
BusState qbus;
domid_t backend_id;
struct xs_handle *xsh;
XenWatchList *watch_list;
struct qemu_xs_handle *xsh;
unsigned int backend_types;
XenWatch **backend_watch;
struct qemu_xs_watch **backend_watch;
QLIST_HEAD(, XenDevice) inactive_devices;
};
@ -102,7 +95,7 @@ void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot,
Error **errp);
void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs,
unsigned int nr_refs, Error **errp);
typedef struct XenDeviceGrantCopySegment {

View file

@ -1,7 +1,7 @@
#ifndef HW_XEN_LEGACY_BACKEND_H
#define HW_XEN_LEGACY_BACKEND_H
#include "hw/xen/xen_common.h"
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h"
#include "net/net.h"
#include "qom/object.h"
@ -15,7 +15,7 @@ DECLARE_INSTANCE_CHECKER(XenLegacyDevice, XENBACKEND,
TYPE_XENBACKEND)
/* variables */
extern struct xs_handle *xenstore;
extern struct qemu_xs_handle *xenstore;
extern const char *xen_protocol;
extern DeviceState *xen_sysdev;
extern BusState *xen_sysbus;
@ -30,9 +30,6 @@ int xenstore_write_be_int64(struct XenLegacyDevice *xendev, const char *node,
char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev);
void xenstore_update_be(char *watch, char *type, int dom,
struct XenDevOps *ops);
char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
@ -51,18 +48,7 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev,
void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot);
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
unsigned int nr_refs);
typedef struct XenGrantCopySegment {
union {
void *virt;
struct {
uint32_t ref;
off_t offset;
} foreign;
} source, dest;
size_t len;
} XenGrantCopySegment;
uint32_t *refs, unsigned int nr_refs);
int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
bool to_domain, XenGrantCopySegment segs[],
@ -75,9 +61,9 @@ static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev,
}
static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev,
void *ptr)
void *ptr, uint32_t ref)
{
return xen_be_unmap_grant_refs(xendev, ptr, 1);
return xen_be_unmap_grant_refs(xendev, ptr, &ref, 1);
}
/* actual backend drivers */

View file

@ -8,15 +8,21 @@
#define QEMU_HW_XEN_H
/*
* As a temporary measure while the headers are being untangled, define
* __XEN_TOOLS__ here before any Xen headers are included. Otherwise, if
* the Xen toolstack library headers are later included, they will find
* some of the "internal" definitions missing and the build will fail. In
* later commits, we'll end up with a rule that the native libraries have
* to be included first, which will ensure that the libraries get the
* version of Xen libraries that they expect.
* C files using Xen toolstack libraries will have included those headers
* already via xen_native.h, and having __XEM_TOOLS__ defined will have
* automatically set __XEN_INTERFACE_VERSION__ to the latest supported
* by the *system* Xen headers which were transitively included.
*
* C files which are part of the internal emulation, and which did not
* include xen_native.h, may need this defined so that the Xen headers
* imported to include/hw/xen/interface/ will expose the appropriate API
* version.
*
* This is why there's a rule that xen_native.h must be included first.
*/
#define __XEN_TOOLS__ 1
#ifndef __XEN_INTERFACE_VERSION__
#define __XEN_INTERFACE_VERSION__ 0x00040e00
#endif
#include "exec/cpu-common.h"
@ -39,8 +45,6 @@ int xen_is_pirq_msi(uint32_t msi_data);
qemu_irq *xen_interrupt_controller_init(void);
void xenstore_store_pv_console_info(int i, Chardev *chr);
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */

View file

@ -0,0 +1,408 @@
/*
* QEMU Xen backend support
*
* Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Authors: David Woodhouse <dwmw2@infradead.org>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef QEMU_XEN_BACKEND_OPS_H
#define QEMU_XEN_BACKEND_OPS_H
#include "hw/xen/xen.h"
#include "hw/xen/interface/xen.h"
#include "hw/xen/interface/io/xenbus.h"
/*
* For the time being, these operations map fairly closely to the API of
* the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
* from XenLegacyDevice back ends to the new XenDevice model, they may
* evolve to slightly higher-level APIs.
*
* The internal emulations do not emulate the Xen APIs entirely faithfully;
* only enough to be used by the Xen backend devices. For example, only one
* event channel can be bound to each handle, since that's sufficient for
* the device support (only the true Xen HVM backend uses more). And the
* behaviour of unmask() and pending() is different too because the device
* backends don't care.
*/
typedef struct xenevtchn_handle xenevtchn_handle;
typedef int xenevtchn_port_or_error_t;
typedef uint32_t evtchn_port_t;
typedef uint16_t domid_t;
typedef uint32_t grant_ref_t;
#define XEN_PAGE_SHIFT 12
#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT)
#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
#ifndef xen_rmb
#define xen_rmb() smp_rmb()
#endif
#ifndef xen_wmb
#define xen_wmb() smp_wmb()
#endif
#ifndef xen_mb
#define xen_mb() smp_mb()
#endif
struct evtchn_backend_ops {
xenevtchn_handle *(*open)(void);
int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
evtchn_port_t guest_port);
int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
int (*close)(struct xenevtchn_handle *xc);
int (*get_fd)(struct xenevtchn_handle *xc);
int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
int (*pending)(struct xenevtchn_handle *xc);
};
extern struct evtchn_backend_ops *xen_evtchn_ops;
static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
{
if (!xen_evtchn_ops) {
return NULL;
}
return xen_evtchn_ops->open();
}
static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
uint32_t domid,
evtchn_port_t guest_port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
}
static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->unbind(xc, port);
}
static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->close(xc);
}
static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->get_fd(xc);
}
static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->notify(xc, port);
}
static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->unmask(xc, port);
}
static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->pending(xc);
}
typedef struct xengntdev_handle xengnttab_handle;
typedef struct XenGrantCopySegment {
union {
void *virt;
struct {
uint32_t ref;
off_t offset;
} foreign;
} source, dest;
size_t len;
} XenGrantCopySegment;
#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0)
struct gnttab_backend_ops {
uint32_t features;
xengnttab_handle *(*open)(void);
int (*close)(xengnttab_handle *xgt);
int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
XenGrantCopySegment *segs, uint32_t nr_segs,
Error **errp);
int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
uint32_t *refs, int prot);
int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
uint32_t count);
};
extern struct gnttab_backend_ops *xen_gnttab_ops;
static inline bool qemu_xen_gnttab_can_map_multi(void)
{
return xen_gnttab_ops &&
!!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
}
static inline xengnttab_handle *qemu_xen_gnttab_open(void)
{
if (!xen_gnttab_ops) {
return NULL;
}
return xen_gnttab_ops->open();
}
static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
{
if (!xen_gnttab_ops) {
return -ENOSYS;
}
return xen_gnttab_ops->close(xgt);
}
static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
bool to_domain, uint32_t domid,
XenGrantCopySegment *segs,
uint32_t nr_segs, Error **errp)
{
if (!xen_gnttab_ops) {
return -ENOSYS;
}
return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
errp);
}
static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
uint32_t nr_grants)
{
if (!xen_gnttab_ops) {
return -ENOSYS;
}
return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
}
static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
uint32_t count, uint32_t domid,
uint32_t *refs, int prot)
{
if (!xen_gnttab_ops) {
return NULL;
}
return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
}
static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
void *start_address, uint32_t *refs,
uint32_t count)
{
if (!xen_gnttab_ops) {
return -ENOSYS;
}
return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
}
struct foreignmem_backend_ops {
void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
xen_pfn_t *pfns, int *errs);
int (*unmap)(void *addr, size_t pages);
};
extern struct foreignmem_backend_ops *xen_foreignmem_ops;
static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
size_t pages, xen_pfn_t *pfns,
int *errs)
{
if (!xen_foreignmem_ops) {
return NULL;
}
return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
}
static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
{
if (!xen_foreignmem_ops) {
return -ENOSYS;
}
return xen_foreignmem_ops->unmap(addr, pages);
}
typedef void (*xs_watch_fn)(void *opaque, const char *path);
struct qemu_xs_handle;
struct qemu_xs_watch;
typedef uint32_t xs_transaction_t;
#define XBT_NULL 0
#define XS_PERM_NONE 0x00
#define XS_PERM_READ 0x01
#define XS_PERM_WRITE 0x02
struct xenstore_backend_ops {
struct qemu_xs_handle *(*open)(void);
void (*close)(struct qemu_xs_handle *h);
char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
const char *path, unsigned int *num);
void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
const char *path, unsigned int *len);
bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
const char *path, const void *data, unsigned int len);
bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
unsigned int owner, unsigned int domid,
unsigned int perms, const char *path);
bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
const char *path);
struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
xs_watch_fn fn, void *opaque);
void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
bool abort);
};
extern struct xenstore_backend_ops *xen_xenstore_ops;
static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
{
if (!xen_xenstore_ops) {
return NULL;
}
return xen_xenstore_ops->open();
}
static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
{
if (!xen_xenstore_ops) {
return;
}
xen_xenstore_ops->close(h);
}
static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
unsigned int domid)
{
if (!xen_xenstore_ops) {
return NULL;
}
return xen_xenstore_ops->get_domain_path(h, domid);
}
static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
xs_transaction_t t, const char *path,
unsigned int *num)
{
if (!xen_xenstore_ops) {
return NULL;
}
return xen_xenstore_ops->directory(h, t, path, num);
}
static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
xs_transaction_t t, const char *path,
unsigned int *len)
{
if (!xen_xenstore_ops) {
return NULL;
}
return xen_xenstore_ops->read(h, t, path, len);
}
static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
xs_transaction_t t, const char *path,
const void *data, unsigned int len)
{
if (!xen_xenstore_ops) {
return false;
}
return xen_xenstore_ops->write(h, t, path, data, len);
}
static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
xs_transaction_t t, unsigned int owner,
unsigned int domid, unsigned int perms,
const char *path)
{
if (!xen_xenstore_ops) {
return false;
}
return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
}
static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
xs_transaction_t t, const char *path)
{
if (!xen_xenstore_ops) {
return false;
}
return xen_xenstore_ops->destroy(h, t, path);
}
static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
const char *path,
xs_watch_fn fn,
void *opaque)
{
if (!xen_xenstore_ops) {
return NULL;
}
return xen_xenstore_ops->watch(h, path, fn, opaque);
}
static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
struct qemu_xs_watch *w)
{
if (!xen_xenstore_ops) {
return;
}
xen_xenstore_ops->unwatch(h, w);
}
static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
{
if (!xen_xenstore_ops) {
return XBT_NULL;
}
return xen_xenstore_ops->transaction_start(h);
}
static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
xs_transaction_t t, bool abort)
{
if (!xen_xenstore_ops) {
return false;
}
return xen_xenstore_ops->transaction_end(h, t, abort);
}
void setup_xen_backend_ops(void);
#endif /* QEMU_XEN_BACKEND_OPS_H */

View file

@ -1,5 +1,9 @@
#ifndef QEMU_HW_XEN_COMMON_H
#define QEMU_HW_XEN_COMMON_H
#ifndef QEMU_HW_XEN_NATIVE_H
#define QEMU_HW_XEN_NATIVE_H
#ifdef __XEN_INTERFACE_VERSION__
#error In Xen native files, include xen_native.h before other Xen headers
#endif
/*
* If we have new enough libxenctrl then we do not want/need these compat
@ -12,7 +16,6 @@
#include <xenctrl.h>
#include <xenstore.h>
#include "hw/xen/interface/io/xenbus.h"
#include "hw/xen/xen.h"
#include "hw/pci/pci_device.h"
@ -28,49 +31,12 @@ extern xc_interface *xen_xc;
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
typedef xc_interface xenforeignmemory_handle;
typedef xc_evtchn xenevtchn_handle;
typedef xc_gnttab xengnttab_handle;
typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
#define xenevtchn_close(h) xc_evtchn_close(h)
#define xenevtchn_fd(h) xc_evtchn_fd(h)
#define xenevtchn_pending(h) xc_evtchn_pending(h)
#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
#define xengnttab_open(l, f) xc_gnttab_open(l, f)
#define xengnttab_close(h) xc_gnttab_close(h)
#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
#define xengnttab_map_grant_refs(h, c, d, r, p) \
xc_gnttab_map_grant_refs(h, c, d, r, p)
#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
#define xenforeignmemory_open(l, f) xen_xc
#define xenforeignmemory_close(h)
static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
int prot, size_t pages,
const xen_pfn_t arr[/*pages*/],
int err[/*pages*/])
{
if (err)
return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
else
return xc_map_foreign_pages(h, dom, prot, arr, pages);
}
#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
#include <xenevtchn.h>
#include <xengnttab.h>
#include <xenforeignmemory.h>
#endif
@ -660,31 +626,4 @@ static inline int xen_set_ioreq_server_state(domid_t dom,
#endif
/* Xen before 4.8 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
struct xengnttab_grant_copy_segment {
union xengnttab_copy_ptr {
void *virt;
struct {
uint32_t ref;
uint16_t offset;
uint16_t domid;
} foreign;
} source, dest;
uint16_t len;
uint16_t flags;
int16_t status;
};
typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
xengnttab_grant_copy_segment_t *segs)
{
return -ENOSYS;
}
#endif
#endif /* QEMU_HW_XEN_COMMON_H */
#endif /* QEMU_HW_XEN_NATIVE_H */

View file

@ -1,7 +1,9 @@
#ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H
#include "hw/xen/xen_common.h"
#include "hw/qdev-core.h"
#include "hw/xen/xen_backend_ops.h"
/* ------------------------------------------------------------- */
#define XEN_BUFSIZE 1024
@ -38,6 +40,7 @@ struct XenLegacyDevice {
char name[64];
int debug;
struct qemu_xs_watch *watch;
enum xenbus_state be_state;
enum xenbus_state fe_state;
int online;
@ -63,7 +66,6 @@ int xenstore_write_int64(const char *base, const char *node, int64_t ival);
char *xenstore_read_str(const char *base, const char *node);
int xenstore_read_int(const char *base, const char *node, int *ival);
int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval);
void xenstore_update(void *unused);
const char *xenbus_strstate(enum xenbus_state state);