mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
Add bdrv_aio_multiwrite
One performance problem of qcow2 during the initial image growth are sequential writes that are not cluster aligned. In this case, when a first requests requires to allocate a new cluster but writes only to the first couple of sectors in that cluster, the rest of the cluster is zeroed - just to be overwritten by the following second request that fills up the cluster. Let's try to merge sequential write requests to the same cluster, so we can avoid to write the zero padding to the disk in the first place. As a nice side effect, also other formats take advantage of dealing with less and larger requests. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
1c3173b9ed
commit
40b4f53967
5 changed files with 230 additions and 0 deletions
25
cutils.c
25
cutils.c
|
@ -151,6 +151,31 @@ void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
|
|||
++qiov->niov;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies iovecs from src to the end dst until src is completely copied or the
|
||||
* total size of the copied iovec reaches size. The size of the last copied
|
||||
* iovec is changed in order to fit the specified total size if it isn't a
|
||||
* perfect fit already.
|
||||
*/
|
||||
void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size)
|
||||
{
|
||||
int i;
|
||||
size_t done;
|
||||
|
||||
assert(dst->nalloc != -1);
|
||||
|
||||
done = 0;
|
||||
for (i = 0; (i < src->niov) && (done != size); i++) {
|
||||
if (done + src->iov[i].iov_len > size) {
|
||||
qemu_iovec_add(dst, src->iov[i].iov_base, size - done);
|
||||
break;
|
||||
} else {
|
||||
qemu_iovec_add(dst, src->iov[i].iov_base, src->iov[i].iov_len);
|
||||
}
|
||||
done += src->iov[i].iov_len;
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_iovec_destroy(QEMUIOVector *qiov)
|
||||
{
|
||||
assert(qiov->nalloc != -1);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue