Xen 2017/04/21 + fix

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJY/5EdAAoJEIlPj0hw4a6QHj8P/1D3iZq8vKyLvnkioYC00ao8
 dhuCFV1Dk0dl/QXvDXxNAHFqmHp2PzHeHF2V19bTbUh9QnY3WH9aSsMQ7YVPDzLb
 6ZezbxXd1l8+IOrWh+ch+x6jiUpRAeHGjhSpFxQEmH5THBmPtLHBFhAeeGpVq6CT
 MPFlN0mr1snyMMbK2aKCVTHgh5Wip83/t/kijIq4RmPwXdJbwxx55PL8jxC/NZHq
 /NbAZzAT149fmItfBNnsRTfNoDs7fSmgM9VelYsnJNHs6qkYYfewxys4vudePG8n
 ZcjCXMv9vdCSTfXfYY9nxhfGCgkkRSvBWrzARRIUPxTXGAmEU52LJrccpG9AEFRZ
 Kgz1JYr0y+u/g+RsCJvAglvmciawXgZH8GIR4sFl2iv4u6cx8PxNRgjTdt7YX4Je
 V7+scmOLB0U5wkI7PUcPV1v2fwKJHFfMdyik257otfMCJiTCnWGJfCZGR4JAdy3C
 NHuG4eIj2XLjZ7IQIo3mg+EfdCWdfVMKtXj0MAFsP6Kcr6sY/ef5sx/wB61k3NU1
 Y4ctI/LAOONsjlC2yzJ4aZR4LgyFcVcwx8FKOK7niCcCgVLYGWpyiHU3kFKYlmKM
 FKIlGPPOK8WuRV9NUGDI9A8XENyFXGyiR2xGCotfgHj+FSSqRzhKH4ecfgNimJVY
 wjTzZmBa68pLHdXaJ+SV
 =OfeB
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/sstabellini/tags/xen-20170421-v2-tag' into staging

Xen 2017/04/21 + fix

# gpg: Signature made Tue 25 Apr 2017 19:10:37 BST
# gpg:                using RSA key 0x894F8F4870E1AE90
# gpg: Good signature from "Stefano Stabellini <stefano.stabellini@eu.citrix.com>"
# gpg:                 aka "Stefano Stabellini <sstabellini@kernel.org>"
# Primary key fingerprint: D04E 33AB A51F 67BA 07D3  0AEA 894F 8F48 70E1 AE90

* remotes/sstabellini/tags/xen-20170421-v2-tag: (21 commits)
  move xen-mapcache.c to hw/i386/xen/
  move xen-hvm.c to hw/i386/xen/
  move xen-common.c to hw/xen/
  add xen-9p-backend to MAINTAINERS under Xen
  xen/9pfs: build and register Xen 9pfs backend
  xen/9pfs: send responses back to the frontend
  xen/9pfs: implement in/out_iov_from_pdu and vmarshal/vunmarshal
  xen/9pfs: receive requests from the frontend
  xen/9pfs: connect to the frontend
  xen/9pfs: introduce Xen 9pfs backend
  9p: introduce a type for the 9p header
  xen: import ring.h from xen
  configure: use pkg-config for obtaining xen version
  xen: additionally restrict xenforeignmemory operations
  xen: use libxendevice model to restrict operations
  xen: use 5 digit xen versions
  xen: use libxendevicemodel when available
  configure: detect presence of libxendevicemodel
  xen: create wrappers for all other uses of xc_hvm_XXX() functions
  xen: rename xen_modified_memory() to xen_hvm_modified_memory()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-04-26 10:22:31 +01:00
commit 52e94ea5de
33 changed files with 1433 additions and 191 deletions

View file

@ -119,6 +119,12 @@ static inline char *rpath(FsContext *ctx, const char *path)
typedef struct V9fsPDU V9fsPDU;
struct V9fsState;
typedef struct {
uint32_t size_le;
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED P9MsgHeader;
struct V9fsPDU
{
uint32_t size;

View file

@ -5,5 +5,6 @@ common-obj-y += coth.o cofs.o codir.o cofile.o
common-obj-y += coxattr.o 9p-synth.o
common-obj-$(CONFIG_OPEN_BY_HANDLE) += 9p-handle.o
common-obj-y += 9p-proxy.o
common-obj-$(CONFIG_XEN) += xen-9p-backend.o
obj-y += virtio-9p-device.o

View file

@ -46,11 +46,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
VirtQueueElement *elem;
while ((pdu = pdu_alloc(s))) {
struct {
uint32_t size_le;
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED out;
P9MsgHeader out;
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {

440
hw/9pfs/xen-9p-backend.c Normal file
View file

@ -0,0 +1,440 @@
/*
* Xen 9p backend
*
* Copyright Aporeto 2017
*
* Authors:
* Stefano Stabellini <stefano@aporeto.com>
*
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/9pfs/9p.h"
#include "hw/xen/xen_backend.h"
#include "hw/9pfs/xen-9pfs.h"
#include "qemu/config-file.h"
#include "fsdev/qemu-fsdev.h"
#define VERSIONS "1"
#define MAX_RINGS 8
#define MAX_RING_ORDER 8
typedef struct Xen9pfsRing {
struct Xen9pfsDev *priv;
int ref;
xenevtchn_handle *evtchndev;
int evtchn;
int local_port;
int ring_order;
struct xen_9pfs_data_intf *intf;
unsigned char *data;
struct xen_9pfs_data ring;
struct iovec *sg;
QEMUBH *bh;
/* local copies, so that we can read/write PDU data directly from
* the ring */
RING_IDX out_cons, out_size, in_cons;
bool inprogress;
} Xen9pfsRing;
typedef struct Xen9pfsDev {
struct XenDevice xendev; /* must be first */
V9fsState state;
char *path;
char *security_model;
char *tag;
char *id;
int num_rings;
Xen9pfsRing *rings;
} Xen9pfsDev;
static void xen_9pfs_in_sg(Xen9pfsRing *ring,
struct iovec *in_sg,
int *num,
uint32_t idx,
uint32_t size)
{
RING_IDX cons, prod, masked_prod, masked_cons;
cons = ring->intf->in_cons;
prod = ring->intf->in_prod;
xen_rmb();
masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
if (masked_prod < masked_cons) {
in_sg[0].iov_base = ring->ring.in + masked_prod;
in_sg[0].iov_len = masked_cons - masked_prod;
*num = 1;
} else {
in_sg[0].iov_base = ring->ring.in + masked_prod;
in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
in_sg[1].iov_base = ring->ring.in;
in_sg[1].iov_len = masked_cons;
*num = 2;
}
}
static void xen_9pfs_out_sg(Xen9pfsRing *ring,
struct iovec *out_sg,
int *num,
uint32_t idx)
{
RING_IDX cons, prod, masked_prod, masked_cons;
cons = ring->intf->out_cons;
prod = ring->intf->out_prod;
xen_rmb();
masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
if (masked_cons < masked_prod) {
out_sg[0].iov_base = ring->ring.out + masked_cons;
out_sg[0].iov_len = ring->out_size;
*num = 1;
} else {
if (ring->out_size >
(XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
out_sg[0].iov_base = ring->ring.out + masked_cons;
out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
masked_cons;
out_sg[1].iov_base = ring->ring.out;
out_sg[1].iov_len = ring->out_size -
(XEN_FLEX_RING_SIZE(ring->ring_order) -
masked_cons);
*num = 2;
} else {
out_sg[0].iov_base = ring->ring.out + masked_cons;
out_sg[0].iov_len = ring->out_size;
*num = 1;
}
}
}
static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
size_t offset,
const char *fmt,
va_list ap)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
struct iovec in_sg[2];
int num;
xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
}
static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
size_t offset,
const char *fmt,
va_list ap)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
struct iovec out_sg[2];
int num;
xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
out_sg, &num, pdu->idx);
return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
}
static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
int num;
g_free(ring->sg);
ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
*piov = ring->sg;
*pniov = num;
}
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov,
size_t size)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
int num;
g_free(ring->sg);
ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
*piov = ring->sg;
*pniov = num;
}
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
{
RING_IDX prod;
Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
g_free(ring->sg);
ring->sg = NULL;
ring->intf->out_cons = ring->out_cons;
xen_wmb();
prod = ring->intf->in_prod;
xen_rmb();
ring->intf->in_prod = prod + pdu->size;
xen_wmb();
ring->inprogress = false;
xenevtchn_notify(ring->evtchndev, ring->local_port);
qemu_bh_schedule(ring->bh);
}
static const struct V9fsTransport xen_9p_transport = {
.pdu_vmarshal = xen_9pfs_pdu_vmarshal,
.pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
.init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
.init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
.push_and_notify = xen_9pfs_push_and_notify,
};
static int xen_9pfs_init(struct XenDevice *xendev)
{
return 0;
}
static int xen_9pfs_receive(Xen9pfsRing *ring)
{
P9MsgHeader h;
RING_IDX cons, prod, masked_prod, masked_cons;
V9fsPDU *pdu;
if (ring->inprogress) {
return 0;
}
cons = ring->intf->out_cons;
prod = ring->intf->out_prod;
xen_rmb();
if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) <
sizeof(h)) {
return 0;
}
ring->inprogress = true;
masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
masked_prod, &masked_cons,
XEN_FLEX_RING_SIZE(ring->ring_order));
/* cannot fail, because we only handle one request per ring at a time */
pdu = pdu_alloc(&ring->priv->state);
pdu->size = le32_to_cpu(h.size_le);
pdu->id = h.id;
pdu->tag = le32_to_cpu(h.tag_le);
ring->out_size = le32_to_cpu(h.size_le);
ring->out_cons = cons + le32_to_cpu(h.size_le);
qemu_co_queue_init(&pdu->complete);
pdu_submit(pdu);
return 0;
}
static void xen_9pfs_bh(void *opaque)
{
Xen9pfsRing *ring = opaque;
xen_9pfs_receive(ring);
}
static void xen_9pfs_evtchn_event(void *opaque)
{
Xen9pfsRing *ring = opaque;
evtchn_port_t port;
port = xenevtchn_pending(ring->evtchndev);
xenevtchn_unmask(ring->evtchndev, port);
qemu_bh_schedule(ring->bh);
}
static int xen_9pfs_free(struct XenDevice *xendev)
{
int i;
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
g_free(xen_9pdev->id);
g_free(xen_9pdev->tag);
g_free(xen_9pdev->path);
g_free(xen_9pdev->security_model);
for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].data != NULL) {
xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
xen_9pdev->rings[i].data,
(1 << xen_9pdev->rings[i].ring_order));
}
if (xen_9pdev->rings[i].intf != NULL) {
xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
xen_9pdev->rings[i].intf,
1);
}
if (xen_9pdev->rings[i].evtchndev > 0) {
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
NULL, NULL, NULL);
xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
xen_9pdev->rings[i].local_port);
}
if (xen_9pdev->rings[i].bh != NULL) {
qemu_bh_delete(xen_9pdev->rings[i].bh);
}
}
g_free(xen_9pdev->rings);
return 0;
}
static int xen_9pfs_connect(struct XenDevice *xendev)
{
int i;
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
V9fsState *s = &xen_9pdev->state;
QemuOpts *fsdev;
if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
&xen_9pdev->num_rings) == -1 ||
xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
return -1;
}
xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
for (i = 0; i < xen_9pdev->num_rings; i++) {
char *str;
int ring_order;
xen_9pdev->rings[i].priv = xen_9pdev;
xen_9pdev->rings[i].evtchn = -1;
xen_9pdev->rings[i].local_port = -1;
str = g_strdup_printf("ring-ref%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].ref) == -1) {
goto out;
}
g_free(str);
str = g_strdup_printf("event-channel-%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].evtchn) == -1) {
goto out;
}
g_free(str);
xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
xen_9pdev->xendev.gnttabdev,
xen_9pdev->xendev.dom,
xen_9pdev->rings[i].ref,
PROT_READ | PROT_WRITE);
if (!xen_9pdev->rings[i].intf) {
goto out;
}
ring_order = xen_9pdev->rings[i].intf->ring_order;
if (ring_order > MAX_RING_ORDER) {
goto out;
}
xen_9pdev->rings[i].ring_order = ring_order;
xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
xen_9pdev->xendev.gnttabdev,
(1 << ring_order),
xen_9pdev->xendev.dom,
xen_9pdev->rings[i].intf->ref,
PROT_READ | PROT_WRITE);
if (!xen_9pdev->rings[i].data) {
goto out;
}
xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
XEN_FLEX_RING_SIZE(ring_order);
xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
xen_9pdev->rings[i].out_cons = 0;
xen_9pdev->rings[i].out_size = 0;
xen_9pdev->rings[i].inprogress = false;
xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
if (xen_9pdev->rings[i].evtchndev == NULL) {
goto out;
}
fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
(xen_9pdev->rings[i].evtchndev,
xendev->dom,
xen_9pdev->rings[i].evtchn);
if (xen_9pdev->rings[i].local_port == -1) {
xen_pv_printf(xendev, 0,
"xenevtchn_bind_interdomain failed port=%d\n",
xen_9pdev->rings[i].evtchn);
goto out;
}
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
}
xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
xen_9pdev->path = xenstore_read_be_str(xendev, "path");
xen_9pdev->id = s->fsconf.fsdev_id =
g_strdup_printf("xen9p%d", xendev->dev);
xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
v9fs_register_transport(s, &xen_9p_transport);
fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
s->fsconf.tag,
1, NULL);
qemu_opt_set(fsdev, "fsdriver", "local", NULL);
qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
qemu_fsdev_add(fsdev);
v9fs_device_realize_common(s, NULL);
return 0;
out:
xen_9pfs_free(xendev);
return -1;
}
static void xen_9pfs_alloc(struct XenDevice *xendev)
{
xenstore_write_be_str(xendev, "versions", VERSIONS);
xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
}
static void xen_9pfs_disconnect(struct XenDevice *xendev)
{
/* Dynamic hotplug of PV filesystems at runtime is not supported. */
}
struct XenDevOps xen_9pfs_ops = {
.size = sizeof(Xen9pfsDev),
.flags = DEVOPS_FLAG_NEED_GNTDEV,
.alloc = xen_9pfs_alloc,
.init = xen_9pfs_init,
.initialise = xen_9pfs_connect,
.disconnect = xen_9pfs_disconnect,
.free = xen_9pfs_free,
};

21
hw/9pfs/xen-9pfs.h Normal file
View file

@ -0,0 +1,21 @@
/*
* Xen 9p backend
*
* Copyright Aporeto 2017
*
* Authors:
* Stefano Stabellini <stefano@aporeto.com>
*
* This work is licensed under the terms of the GNU GPL version 2 or
* later. See the COPYING file in the top-level directory.
*
*/
#include <xen/io/protocols.h>
#include "hw/xen/io/ring.h"
/*
* Do not merge into xen-9p-backend.c: clang doesn't allow unused static
* inline functions in c files.
*/
DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);