migration: rename rate limiting fields in QEMUFile

This renames the following QEMUFile fields

 * bytes_xfer -> rate_limit_used
 * xfer_limit -> rate_limit_max

The intent is to make it clear that 'bytes_xfer' is specifically related
to rate limiting of data and applies to data queued, which need not have
been transferred on the wire yet if a flush hasn't taken place.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
Daniel P. Berrangé 2022-06-20 12:01:48 +01:00 committed by Dr. David Alan Gilbert
parent 246683c22f
commit c7fc8d323a

View file

@ -39,8 +39,16 @@ struct QEMUFile {
const QEMUFileHooks *hooks; const QEMUFileHooks *hooks;
void *opaque; void *opaque;
int64_t bytes_xfer; /*
int64_t xfer_limit; * Maximum amount of data in bytes to transfer during one
* rate limiting time window
*/
int64_t rate_limit_max;
/*
* Total amount of data in bytes queued for transfer
* during this rate limiting time window
*/
int64_t rate_limit_used;
int64_t pos; /* start of buffer when writing, end of buffer int64_t pos; /* start of buffer when writing, end of buffer
when reading */ when reading */
@ -304,7 +312,7 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
int ret = f->hooks->save_page(f, f->opaque, block_offset, int ret = f->hooks->save_page(f, f->opaque, block_offset,
offset, size, bytes_sent); offset, size, bytes_sent);
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
f->bytes_xfer += size; f->rate_limit_used += size;
} }
if (ret != RAM_SAVE_CONTROL_DELAYED && if (ret != RAM_SAVE_CONTROL_DELAYED &&
@ -457,7 +465,7 @@ void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size,
return; return;
} }
f->bytes_xfer += size; f->rate_limit_used += size;
add_to_iovec(f, buf, size, may_free); add_to_iovec(f, buf, size, may_free);
} }
@ -475,7 +483,7 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size)
l = size; l = size;
} }
memcpy(f->buf + f->buf_index, buf, l); memcpy(f->buf + f->buf_index, buf, l);
f->bytes_xfer += l; f->rate_limit_used += l;
add_buf_to_iovec(f, l); add_buf_to_iovec(f, l);
if (qemu_file_get_error(f)) { if (qemu_file_get_error(f)) {
break; break;
@ -492,7 +500,7 @@ void qemu_put_byte(QEMUFile *f, int v)
} }
f->buf[f->buf_index] = v; f->buf[f->buf_index] = v;
f->bytes_xfer++; f->rate_limit_used++;
add_buf_to_iovec(f, 1); add_buf_to_iovec(f, 1);
} }
@ -674,7 +682,7 @@ int qemu_file_rate_limit(QEMUFile *f)
if (qemu_file_get_error(f)) { if (qemu_file_get_error(f)) {
return 1; return 1;
} }
if (f->xfer_limit > 0 && f->bytes_xfer > f->xfer_limit) { if (f->rate_limit_max > 0 && f->rate_limit_used > f->rate_limit_max) {
return 1; return 1;
} }
return 0; return 0;
@ -682,22 +690,22 @@ int qemu_file_rate_limit(QEMUFile *f)
int64_t qemu_file_get_rate_limit(QEMUFile *f) int64_t qemu_file_get_rate_limit(QEMUFile *f)
{ {
return f->xfer_limit; return f->rate_limit_max;
} }
void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit)
{ {
f->xfer_limit = limit; f->rate_limit_max = limit;
} }
void qemu_file_reset_rate_limit(QEMUFile *f) void qemu_file_reset_rate_limit(QEMUFile *f)
{ {
f->bytes_xfer = 0; f->rate_limit_used = 0;
} }
void qemu_file_update_transfer(QEMUFile *f, int64_t len) void qemu_file_update_transfer(QEMUFile *f, int64_t len)
{ {
f->bytes_xfer += len; f->rate_limit_used += len;
} }
void qemu_put_be16(QEMUFile *f, unsigned int v) void qemu_put_be16(QEMUFile *f, unsigned int v)