Block patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJUEwy3AAoJEH8JsnLIjy/WqkUQALPsR68w2bB6aiN6zUaJt1X3
 VaksCQGgtZdN6itDvn6v6ktayFXXfjRE+U0hK7joXUiokq17YZmKqf+1V4LPJRSW
 Tv21gIAHuIyf+8LL/xGS3W9+EEXAaKbp1t6AT/VDWv/mQ4KY5xrvhn2E/+7r0wKr
 EBOHrKd4tQualV12MtrZsrWZy3oMQvkimcVIfnjFZ2gJg5dmUBXQ35Kdj9+AxDiX
 1hDizBRbozvzSBCnS9PUcJ1OfCxoCRewbHn43LeCYWyB8m3ttpdPpuMaUoSNGrVY
 Tw7aYvYjMArr/ChrF8eH2vKJSeHabSPbYqgNsGqpS2n5KYJbzoyv8iQQCSHjtKZe
 vagoIRomF/BtOWT8mvUSHGw2vmQm6JZJdHJsXNeyDJ/P8ZSSm0vsZMjqh6vwS7sB
 +AURb5BaFWNnThwm80tJl23uJLjohNsdrmuLvAiHX0e03dyyQFDBS1zqb9BTbOsP
 SdBPFZy1hA0deYnJlyeLj94iyIosdsMihLkDJrIdNzn6qMF9QCdFs+rgOepwsfml
 ZNG1h2V+Wo3LS1SkKpK0mhiTBFLCit8Cq03+n95zBTcPCBMGgoJVC2VZef8XXKDn
 v6vuSYikCkEIDEWhsUrIZmDWKv/83AwSW+i+ir3IOVgxOJ51Z/mr5PAQQ+3/Gaat
 G5gSIDmW4rGgYDk/coDf
 =3He1
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging

Block patches

# gpg: Signature made Fri 12 Sep 2014 16:09:43 BST using RSA key ID C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"

* remotes/kevin/tags/for-upstream: (22 commits)
  qcow2: Add falloc and full preallocation option
  raw-posix: Add falloc and full preallocation option
  qapi: introduce PreallocMode and new PreallocModes full and falloc.
  block: don't convert file size to sector size
  block: round up file size to nearest sector
  iotests: Send the correct fd in socket_scm_helper
  blockdev: Refuse to drive_del something added with blockdev-add
  block: extend BLOCK_IO_ERROR with reason string
  dataplane: fix virtio_blk_data_plane_create() op blocker error path
  qemu-iotests: Run 025 for Archipelago block driver
  block/archipelago: Implement bdrv_truncate()
  block: Make the block accounting functions operate on BlockAcctStats
  block: rename BlockAcctType members to start with BLOCK_ instead of BDRV_
  block: Extract the block accounting code
  block: Extract the BlockAcctStats structure
  IDE: MMIO IDE device control should be little endian
  thread-pool: Drop unnecessary includes
  xen: Drop redundant bdrv_close() from pci_piix3_xen_ide_unplug()
  xen_disk: Plug memory leak on error path
  qemu-io: Clean up openfile() after commit 2e40134
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-09-15 17:35:21 +01:00
commit f2bcdc8de0
53 changed files with 709 additions and 284 deletions

View file

@ -164,8 +164,8 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
* block jobs that can conflict.
*/
if (bdrv_op_is_blocked(blk->conf.bs, BLOCK_OP_TYPE_DATAPLANE, &local_err)) {
error_report("cannot start dataplane thread: %s",
error_get_pretty(local_err));
error_setg(errp, "cannot start dataplane thread: %s",
error_get_pretty(local_err));
error_free(local_err);
return;
}

View file

@ -197,7 +197,7 @@ static void nvme_rw_cb(void *opaque, int ret)
NvmeCtrl *n = sq->ctrl;
NvmeCQueue *cq = n->cq[sq->cqid];
bdrv_acct_done(n->conf.bs, &req->acct);
block_acct_done(bdrv_get_stats(n->conf.bs), &req->acct);
if (!ret) {
req->status = NVME_SUCCESS;
} else {
@ -232,7 +232,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
assert((nlb << data_shift) == req->qsg.size);
dma_acct_start(n->conf.bs, &req->acct, &req->qsg, is_write ?
BDRV_ACCT_WRITE : BDRV_ACCT_READ);
BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
req->aiocb = is_write ?
dma_bdrv_write(n->conf.bs, &req->qsg, aio_slba, nvme_rw_cb, req) :
dma_bdrv_read(n->conf.bs, &req->qsg, aio_slba, nvme_rw_cb, req);

View file

@ -74,7 +74,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
s->rq = req;
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
bdrv_acct_done(s->bs, &req->acct);
block_acct_done(bdrv_get_stats(s->bs), &req->acct);
virtio_blk_free_request(req);
}
@ -96,7 +96,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
block_acct_done(bdrv_get_stats(req->dev->bs), &req->acct);
virtio_blk_free_request(req);
}
@ -111,7 +111,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
block_acct_done(bdrv_get_stats(req->dev->bs), &req->acct);
virtio_blk_free_request(req);
}
@ -279,7 +279,8 @@ void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb)
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, 0,
BLOCK_ACCT_FLUSH);
/*
* Make sure all outstanding writes are posted to the backing device.
@ -322,7 +323,8 @@ static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
return;
}
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, req->qiov.size,
BLOCK_ACCT_WRITE);
if (mrb->num_writes == 32) {
virtio_submit_multiwrite(req->dev->bs, mrb);
@ -353,7 +355,8 @@ static void virtio_blk_handle_read(VirtIOBlockReq *req)
return;
}
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, req->qiov.size,
BLOCK_ACCT_READ);
bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
req->qiov.size / BDRV_SECTOR_SIZE,
virtio_blk_rw_complete, req);

View file

@ -493,7 +493,7 @@ static void qemu_aio_complete(void *opaque, int ret)
break;
}
case BLKIF_OP_READ:
bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct);
break;
case BLKIF_OP_DISCARD:
default:
@ -518,7 +518,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
ioreq->v.size, BLOCK_ACCT_READ);
ioreq->aio_inflight++;
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
@ -530,7 +531,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
break;
}
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
ioreq->v.size, BLOCK_ACCT_WRITE);
ioreq->aio_inflight++;
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
@ -852,28 +854,25 @@ static int blk_connect(struct XenDevice *xendev)
blkdev->dinfo = drive_get(IF_XEN, 0, index);
if (!blkdev->dinfo) {
Error *local_err = NULL;
BlockDriver *drv;
/* setup via xenbus -> create new block driver instance */
xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
blkdev->bs = bdrv_new(blkdev->dev, &local_err);
if (local_err) {
blkdev->bs = NULL;
}
if (blkdev->bs) {
BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
readonly);
if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
drv, &local_err) != 0)
{
xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
error_get_pretty(local_err));
error_free(local_err);
bdrv_unref(blkdev->bs);
blkdev->bs = NULL;
}
}
blkdev->bs = bdrv_new(blkdev->dev, NULL);
if (!blkdev->bs) {
return -1;
}
drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
drv, &local_err) != 0) {
xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
error_get_pretty(local_err));
error_free(local_err);
bdrv_unref(blkdev->bs);
blkdev->bs = NULL;
return -1;
}
} else {
/* setup via qemu cmdline -> already setup for us */
xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");

View file

@ -809,7 +809,8 @@ static void ncq_cb(void *opaque, int ret)
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
ncq_tfs->tag);
bdrv_acct_done(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct);
block_acct_done(bdrv_get_stats(ncq_tfs->drive->port.ifs[0].bs),
&ncq_tfs->acct);
qemu_sglist_destroy(&ncq_tfs->sglist);
ncq_tfs->used = 0;
}
@ -860,7 +861,7 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
ncq_tfs->tag, ncq_tfs->lba);
dma_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
&ncq_tfs->sglist, BDRV_ACCT_READ);
&ncq_tfs->sglist, BLOCK_ACCT_READ);
ncq_tfs->aiocb = dma_bdrv_read(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);
@ -873,7 +874,7 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
ncq_tfs->tag, ncq_tfs->lba);
dma_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
&ncq_tfs->sglist, BDRV_ACCT_WRITE);
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
ncq_tfs->aiocb = dma_bdrv_write(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);

View file

@ -110,14 +110,16 @@ static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size)
switch(sector_size) {
case 2048:
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4);
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
break;
case 2352:
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4);
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
if (ret < 0)
return ret;
cd_data_to_raw(buf, lba);
@ -253,7 +255,8 @@ static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size)
s->io_buffer_index = 0;
if (s->atapi_dma) {
bdrv_acct_start(s->bs, &s->acct, size, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, size,
BLOCK_ACCT_READ);
s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
ide_start_dma(s, ide_atapi_cmd_read_dma_cb);
} else {
@ -354,7 +357,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
return;
eot:
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
ide_set_inactive(s, false);
}
@ -369,7 +372,8 @@ static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors,
s->io_buffer_size = 0;
s->cd_sector_size = sector_size;
bdrv_acct_start(s->bs, &s->acct, s->packet_transfer_size, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->packet_transfer_size,
BLOCK_ACCT_READ);
/* XXX: check if BUSY_STAT should be set */
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;

View file

@ -568,7 +568,7 @@ static void ide_sector_read_cb(void *opaque, int ret)
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
IDE_RETRY_READ)) {
@ -624,7 +624,8 @@ void ide_sector_read(IDEState *s)
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_readv(s->bs, sector_num, &s->qiov, n,
ide_sector_read_cb, s);
}
@ -756,7 +757,7 @@ void ide_dma_cb(void *opaque, int ret)
eot:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
}
ide_set_inactive(s, stay_active);
}
@ -770,12 +771,12 @@ static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
switch (dma_cmd) {
case IDE_DMA_READ:
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
break;
case IDE_DMA_WRITE:
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
break;
default:
break;
@ -802,7 +803,7 @@ static void ide_sector_write_cb(void *opaque, int ret)
IDEState *s = opaque;
int n;
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
@ -869,7 +870,8 @@ void ide_sector_write(IDEState *s)
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_writev(s->bs, sector_num, &s->qiov, n,
ide_sector_write_cb, s);
}
@ -888,7 +890,7 @@ static void ide_flush_cb(void *opaque, int ret)
}
if (s->bs) {
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
}
s->status = READY_STAT | SEEK_STAT;
ide_cmd_done(s);
@ -903,7 +905,7 @@ void ide_flush_cache(IDEState *s)
}
s->status |= BUSY_STAT;
bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, 0, BLOCK_ACCT_FLUSH);
s->pio_aiocb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
}

View file

@ -171,7 +171,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
done:
MACIO_DPRINTF("done DMA\n");
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
io->dma_end(opaque);
}
@ -352,7 +352,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
done:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
}
io->dma_end(io);
}
@ -370,8 +370,8 @@ static void pmac_ide_transfer(DBDMA_io *io)
/* Handle non-block ATAPI DMA transfers */
if (s->lba == -1) {
s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
bdrv_acct_start(s->bs, &s->acct, s->io_buffer_size,
BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->io_buffer_size,
BLOCK_ACCT_READ);
MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
s->io_buffer_size);
@ -382,22 +382,25 @@ static void pmac_ide_transfer(DBDMA_io *io)
m->dma_active = false;
MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
bdrv_acct_done(s->bs, &s->acct);
block_acct_done(bdrv_get_stats(s->bs), &s->acct);
io->dma_end(io);
return;
}
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_READ);
pmac_ide_atapi_transfer_cb(io, 0);
return;
}
switch (s->dma_cmd) {
case IDE_DMA_READ:
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_READ);
break;
case IDE_DMA_WRITE:
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_WRITE);
break;
default:
break;

View file

@ -82,7 +82,7 @@ static void mmio_ide_write(void *opaque, hwaddr addr,
static const MemoryRegionOps mmio_ide_ops = {
.read = mmio_ide_read,
.write = mmio_ide_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.endianness = DEVICE_LITTLE_ENDIAN,
};
static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr,
@ -102,7 +102,7 @@ static void mmio_ide_cmd_write(void *opaque, hwaddr addr,
static const MemoryRegionOps mmio_ide_cs_ops = {
.read = mmio_ide_status_read,
.write = mmio_ide_cmd_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.endianness = DEVICE_LITTLE_ENDIAN,
};
static const VMStateDescription vmstate_ide_mmio = {

View file

@ -182,7 +182,6 @@ int pci_piix3_xen_ide_unplug(DeviceState *dev)
if (ds) {
bdrv_detach_dev(di->bdrv, ds);
}
bdrv_close(di->bdrv);
pci_ide->bus[di->bus].ifs[di->unit].bs = NULL;
drive_del(di);
}

View file

@ -183,7 +183,7 @@ static void scsi_aio_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) {
goto done;
}
@ -237,7 +237,8 @@ static void scsi_write_do_fua(SCSIDiskReq *r)
}
if (scsi_is_cmd_fua(&r->req.cmd)) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return;
}
@ -257,7 +258,7 @@ static void scsi_dma_complete_noio(void *opaque, int ret)
if (r->req.aiocb != NULL) {
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
}
if (r->req.io_canceled) {
goto done;
@ -300,7 +301,7 @@ static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) {
goto done;
}
@ -333,7 +334,7 @@ static void scsi_do_read(void *opaque, int ret)
if (r->req.aiocb != NULL) {
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
}
if (r->req.io_canceled) {
goto done;
@ -349,13 +350,14 @@ static void scsi_do_read(void *opaque, int ret)
scsi_req_ref(&r->req);
if (r->req.sg) {
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_READ);
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BLOCK_ACCT_READ);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_bdrv_read(s->qdev.conf.bs, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
n = scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
}
@ -399,7 +401,8 @@ static void scsi_read_data(SCSIRequest *req)
first = !r->started;
r->started = true;
if (first && scsi_is_cmd_fua(&r->req.cmd)) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_do_read, r);
} else {
scsi_do_read(r, 0);
@ -453,7 +456,7 @@ static void scsi_write_complete(void * opaque, int ret)
if (r->req.aiocb != NULL) {
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
}
if (r->req.io_canceled) {
goto done;
@ -522,13 +525,14 @@ static void scsi_write_data(SCSIRequest *req)
}
if (r->req.sg) {
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_WRITE);
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_bdrv_write(s->qdev.conf.bs, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
n = r->qiov.size / 512;
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_write_complete, r);
}
@ -1496,7 +1500,8 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
if (!bdrv_enable_write_cache(s->qdev.conf.bs)) {
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return;
}
@ -1647,7 +1652,7 @@ static void scsi_write_same_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) {
goto done;
}
@ -1662,7 +1667,8 @@ static void scsi_write_same_complete(void *opaque, int ret)
data->sector += data->iov.iov_len / 512;
data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
if (data->iov.iov_len) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, data->iov.iov_len, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
data->iov.iov_len, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector,
&data->qiov, data->iov.iov_len / 512,
scsi_write_same_complete, data);
@ -1708,8 +1714,9 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, nb_sectors * s->qdev.blocksize,
BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
nb_sectors * s->qdev.blocksize,
BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_write_zeroes(s->qdev.conf.bs,
r->req.cmd.lba * (s->qdev.blocksize / 512),
nb_sectors * (s->qdev.blocksize / 512),
@ -1730,7 +1737,8 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
}
scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, data->iov.iov_len, BDRV_ACCT_WRITE);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
data->iov.iov_len, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector,
&data->qiov, data->iov.iov_len / 512,
scsi_write_same_complete, data);
@ -1994,7 +2002,8 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
case SYNCHRONIZE_CACHE:
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return 0;
case SEEK_10: