mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
virtio-blk: handle virtio_blk_handle_request() errors
All these errors are caused by a buggy guest: QEMU should not exit. With this patch, if virtio_blk_handle_request() detects a buggy request, it marks the device as broken and returns an error to the caller so it takes appropriate action. In the case of virtio_blk_handle_vq(), we detach the request from the virtqueue, free its allocated memory and stop popping new requests. We don't need to bother about multireq since virtio_blk_handle_request() errors out early and mrb.num_reqs == 0. In the case of virtio_blk_dma_restart_bh(), we need to detach and free all queued requests as well. Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
d3d74d6fe0
commit
20ea686a0c
1 changed files with 28 additions and 10 deletions
|
@ -468,30 +468,32 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
||||||
{
|
{
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
struct iovec *in_iov = req->elem.in_sg;
|
struct iovec *in_iov = req->elem.in_sg;
|
||||||
struct iovec *iov = req->elem.out_sg;
|
struct iovec *iov = req->elem.out_sg;
|
||||||
unsigned in_num = req->elem.in_num;
|
unsigned in_num = req->elem.in_num;
|
||||||
unsigned out_num = req->elem.out_num;
|
unsigned out_num = req->elem.out_num;
|
||||||
|
VirtIOBlock *s = req->dev;
|
||||||
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||||
|
|
||||||
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
|
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
|
||||||
error_report("virtio-blk missing headers");
|
virtio_error(vdev, "virtio-blk missing headers");
|
||||||
exit(1);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
|
if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
|
||||||
sizeof(req->out)) != sizeof(req->out))) {
|
sizeof(req->out)) != sizeof(req->out))) {
|
||||||
error_report("virtio-blk request outhdr too short");
|
virtio_error(vdev, "virtio-blk request outhdr too short");
|
||||||
exit(1);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
iov_discard_front(&iov, &out_num, sizeof(req->out));
|
iov_discard_front(&iov, &out_num, sizeof(req->out));
|
||||||
|
|
||||||
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
|
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
|
||||||
error_report("virtio-blk request inhdr too short");
|
virtio_error(vdev, "virtio-blk request inhdr too short");
|
||||||
exit(1);
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We always touch the last byte, so just see how big in_iov is. */
|
/* We always touch the last byte, so just see how big in_iov is. */
|
||||||
|
@ -529,7 +531,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
||||||
block_acct_invalid(blk_get_stats(req->dev->blk),
|
block_acct_invalid(blk_get_stats(req->dev->blk),
|
||||||
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
|
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
|
||||||
virtio_blk_free_request(req);
|
virtio_blk_free_request(req);
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
block_acct_start(blk_get_stats(req->dev->blk),
|
block_acct_start(blk_get_stats(req->dev->blk),
|
||||||
|
@ -576,6 +578,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
||||||
virtio_blk_free_request(req);
|
virtio_blk_free_request(req);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
||||||
|
@ -586,7 +589,11 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
||||||
blk_io_plug(s->blk);
|
blk_io_plug(s->blk);
|
||||||
|
|
||||||
while ((req = virtio_blk_get_request(s, vq))) {
|
while ((req = virtio_blk_get_request(s, vq))) {
|
||||||
virtio_blk_handle_request(req, &mrb);
|
if (virtio_blk_handle_request(req, &mrb)) {
|
||||||
|
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||||
|
virtio_blk_free_request(req);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mrb.num_reqs) {
|
if (mrb.num_reqs) {
|
||||||
|
@ -625,7 +632,18 @@ static void virtio_blk_dma_restart_bh(void *opaque)
|
||||||
|
|
||||||
while (req) {
|
while (req) {
|
||||||
VirtIOBlockReq *next = req->next;
|
VirtIOBlockReq *next = req->next;
|
||||||
virtio_blk_handle_request(req, &mrb);
|
if (virtio_blk_handle_request(req, &mrb)) {
|
||||||
|
/* Device is now broken and won't do any processing until it gets
|
||||||
|
* reset. Already queued requests will be lost: let's purge them.
|
||||||
|
*/
|
||||||
|
while (req) {
|
||||||
|
next = req->next;
|
||||||
|
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||||
|
virtio_blk_free_request(req);
|
||||||
|
req = next;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
req = next;
|
req = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue