mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-10 02:54:58 -06:00
block/dirty-bitmap: switch _next_dirty_area and _next_zero to int64_t
We are going to introduce bdrv_dirty_bitmap_next_dirty so that same variable may be used to store its return value and to be its parameter, so it would int64_t. Similarly, we are going to refactor hbitmap_next_dirty_area to use hbitmap_next_dirty together with hbitmap_next_zero, therefore we want hbitmap_next_zero parameter type to be int64_t too. So, for convenience update all parameters of *_next_zero and *_next_dirty_area to be int64_t. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Message-id: 20200205112041.6003-6-vsementsov@virtuozzo.com Signed-off-by: John Snow <jsnow@redhat.com>
This commit is contained in:
parent
0c88f1970c
commit
642700fda0
6 changed files with 36 additions and 34 deletions
|
@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
|
|||
}
|
||||
}
|
||||
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
|
||||
{
|
||||
size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
|
||||
unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
|
||||
|
@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
|
|||
uint64_t end_bit, sz;
|
||||
int64_t res;
|
||||
|
||||
assert(start >= 0 && count >= 0);
|
||||
|
||||
if (start >= hb->orig_size || count == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -244,14 +246,15 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
|
|||
return res;
|
||||
}
|
||||
|
||||
bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
|
||||
uint64_t *count)
|
||||
bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count)
|
||||
{
|
||||
HBitmapIter hbi;
|
||||
int64_t firt_dirty_off, area_end;
|
||||
uint32_t granularity = 1UL << hb->granularity;
|
||||
uint64_t end;
|
||||
|
||||
assert(*start >= 0 && *count >= 0);
|
||||
|
||||
if (*start >= hb->orig_size || *count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
@ -834,8 +837,8 @@ bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
|
|||
*/
|
||||
static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
|
||||
{
|
||||
uint64_t offset = 0;
|
||||
uint64_t count = src->orig_size;
|
||||
int64_t offset = 0;
|
||||
int64_t count = src->orig_size;
|
||||
|
||||
while (hbitmap_next_dirty_area(src, &offset, &count)) {
|
||||
hbitmap_set(dst, offset, count);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue