migration: add bitmap for received page

This patch adds ability to track down already received
pages, it's necessary for calculation vCPU block time in
postcopy migration feature, and for recovery after
postcopy migration failure.

Also it's necessary to solve shared memory issue in
postcopy livemigration. Information about received pages
will be transferred to the software virtual bridge
(e.g. OVS-VSWITCHD), to avoid fallocate (unmap) for
already received pages. fallocate syscall is required for
remmaped shared memory, due to remmaping itself blocks
ioctl(UFFDIO_COPY, ioctl in this case will end with EEXIT
error (struct page is exists after remmap).

Bitmap is placed into RAMBlock as another postcopy/precopy
related bitmaps.

Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Alexey Perevalov 2017-10-05 14:13:20 +03:00 committed by Juan Quintela
parent 727b9d7e49
commit f949461489
4 changed files with 67 additions and 5 deletions

View file

@ -642,22 +642,28 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
}
static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
void *from_addr, uint64_t pagesize)
void *from_addr, uint64_t pagesize, RAMBlock *rb)
{
int ret;
if (from_addr) {
struct uffdio_copy copy_struct;
copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
copy_struct.src = (uint64_t)(uintptr_t)from_addr;
copy_struct.len = pagesize;
copy_struct.mode = 0;
return ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
} else {
struct uffdio_zeropage zero_struct;
zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
zero_struct.range.len = pagesize;
zero_struct.mode = 0;
return ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
}
if (!ret) {
ramblock_recv_bitmap_set_range(rb, host_addr,
pagesize / qemu_target_page_size());
}
return ret;
}
/*
@ -674,7 +680,7 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
* which would be slightly cheaper, but we'd have to be careful
* of the order of updating our page state.
*/
if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize)) {
if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) {
int e = errno;
error_report("%s: %s copy host: %p from: %p (size: %zd)",
__func__, strerror(e), host, from, pagesize);
@ -696,7 +702,8 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
trace_postcopy_place_page_zero(host);
if (qemu_ram_pagesize(rb) == getpagesize()) {
if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, getpagesize())) {
if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, getpagesize(),
rb)) {
int e = errno;
error_report("%s: %s zero host: %p",
__func__, strerror(e), host);