mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-01 06:43:53 -06:00
Pull request trivial patches 20200919
-----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEzS913cjjpNwuT1Fz8ww4vT8vvjwFAl9mUVcSHGxhdXJlbnRA dml2aWVyLmV1AAoJEPMMOL0/L748c5IP/2Jh7HuM5LpGuhca81zCnUxIHnnfXLpR YXbRsD/q4VrCe9WxFZeyul1zcCpV4BnLNqsWA2PH44at+vcvCuXLU9vVzar1SMTh pAwuXc4qGkV4zttLzzYwkimQLxHl1Cy7RtoLJB7GjLj0A/VBvD7Z2cO2KSF4EOzU KQAHcIm8WYWjZy8lx5ZrCvq5KkPHMK+XvVxD+v/gXVWzU23wFMVJwhzi2PXqetRe RnAFA8tF3xlvXTJmeqqN277Otv6WLnANe1rjr/w4j5tUINaaiAX/gWkrwcFZprjo 1p0E3o8ztrtql7B8DWH+xWLeFUpq3Qd9Ztp4ujFmpWQysbCZ6BWFocAz+v4Dd0F3 luJP0e8X5hQAzJiu9aucOKpnUHaieWamo5J+5pWezTGB0wNYgnhRDp2LAefadV+I WmDjIWtZZ3Je48qT0bGzh+p8ZSqGQx/a5xx6eXr7MdlNhiWIV/evqotU2MoLnO7d QhQevHlk7nxayk3laVA4nTwJRdtEN8zfbuAB+gMZZvR11yBNrBm6q7oMNhkuP0QV glcta70RE7Nfa4TZaFzEzrjiF6V0k0+TtGY0VPB/0xjtCepiwOuoVbEjSe4arJ7Z 1LkGY45Rdaas8yqWwZGAjbFWTkke85v+S8g2lCj/HihgfPf585uRZVPhJ9sIGc9w JcWyaIFsgHh8 =MxMx -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-for-5.2-pull-request' into staging Pull request trivial patches 20200919 # gpg: Signature made Sat 19 Sep 2020 19:43:35 BST # gpg: using RSA key CD2F75DDC8E3A4DC2E4F5173F30C38BD3F2FBE3C # gpg: issuer "laurent@vivier.eu" # gpg: Good signature from "Laurent Vivier <lvivier@redhat.com>" [full] # gpg: aka "Laurent Vivier <laurent@vivier.eu>" [full] # gpg: aka "Laurent Vivier (Red Hat) <lvivier@redhat.com>" [full] # Primary key fingerprint: CD2F 75DD C8E3 A4DC 2E4F 5173 F30C 38BD 3F2F BE3C * remotes/vivier2/tags/trivial-branch-for-5.2-pull-request: contrib/: fix some comment spelling errors qapi/: fix some comment spelling errors disas/: fix some comment spelling errors linux-user/: fix some comment spelling errors util/: fix some comment spelling errors scripts/: fix some comment spelling errors docs/: fix some comment spelling errors migration/: fix some comment spelling errors qemu/: fix some comment spelling errors scripts/git.orderfile: Display meson files along with buildsys ones hw/timer/hpet: Fix debug format strings hw/timer/hpet: Remove unused functions hpet_ram_readb, hpet_ram_readw meson: remove empty else and duplicated gio deps manual: escape backslashes in "parsed-literal" blocks ui/spice-input: Remove superfluous forward declaration hw/ppc/ppc4xx_pci: Replace magic value by the PCI_NUM_PINS definition hw/gpio/max7310: Remove impossible check Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
834b9273d5
69 changed files with 163 additions and 179 deletions
|
@ -46,7 +46,7 @@ void failover_request_active(Error **errp)
|
|||
{
|
||||
if (failover_set_state(FAILOVER_STATUS_NONE,
|
||||
FAILOVER_STATUS_REQUIRE) != FAILOVER_STATUS_NONE) {
|
||||
error_setg(errp, "COLO failover is already actived");
|
||||
error_setg(errp, "COLO failover is already activated");
|
||||
return;
|
||||
}
|
||||
failover_bh = qemu_bh_new(colo_failover_bh, NULL);
|
||||
|
|
|
@ -632,7 +632,7 @@ out:
|
|||
/*
|
||||
* It is safe to unregister notifier after failover finished.
|
||||
* Besides, colo_delay_timer and colo_checkpoint_sem can't be
|
||||
* released befor unregister notifier, or there will be use-after-free
|
||||
* released before unregister notifier, or there will be use-after-free
|
||||
* error.
|
||||
*/
|
||||
colo_compare_unregister_notifier(&packets_compare_notifier);
|
||||
|
|
|
@ -731,7 +731,7 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
|
|||
qemu_sem_post(&p->sem_sync);
|
||||
/*
|
||||
* Although multifd_send_thread is not created, but main migration
|
||||
* thread neet to judge whether it is running, so we need to mark
|
||||
* thread needs to judge whether it is running, so we need to mark
|
||||
* its status.
|
||||
*/
|
||||
p->quit = true;
|
||||
|
@ -1042,7 +1042,7 @@ bool multifd_recv_all_channels_created(void)
|
|||
|
||||
/*
|
||||
* Try to receive all multifd channels to get ready for the migration.
|
||||
* - Return true and do not set @errp when correctly receving all channels;
|
||||
* - Return true and do not set @errp when correctly receiving all channels;
|
||||
* - Return false and do not set @errp when correctly receiving the current one;
|
||||
* - Return false and set @errp when failing to receive the current channel.
|
||||
*/
|
||||
|
|
|
@ -237,7 +237,7 @@ release_ufd:
|
|||
* request_ufd_features: this function should be called only once on a newly
|
||||
* opened ufd, subsequent calls will lead to error.
|
||||
*
|
||||
* Returns: true on succes
|
||||
* Returns: true on success
|
||||
*
|
||||
* @ufd: fd obtained from userfaultfd syscall
|
||||
* @features: bit mask see UFFD_API_FEATURES
|
||||
|
@ -807,7 +807,7 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
|
|||
|
||||
low_time_offset = get_low_time_offset(dc);
|
||||
/* lookup cpu, to clear it,
|
||||
* that algorithm looks straighforward, but it's not
|
||||
* that algorithm looks straightforward, but it's not
|
||||
* optimal, more optimal algorithm is keeping tree or hash
|
||||
* where key is address value is a list of */
|
||||
for (i = 0; i < smp_cpus; i++) {
|
||||
|
|
|
@ -161,7 +161,7 @@ struct PostCopyFD {
|
|||
*/
|
||||
void postcopy_register_shared_ufd(struct PostCopyFD *pcfd);
|
||||
void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd);
|
||||
/* Call each of the shared 'waker's registerd telling them of
|
||||
/* Call each of the shared 'waker's registered telling them of
|
||||
* availability of a block.
|
||||
*/
|
||||
int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset);
|
||||
|
|
|
@ -256,7 +256,7 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
|
|||
/*
|
||||
* Always use little endian when sending the bitmap. This is
|
||||
* required that when source and destination VMs are not using the
|
||||
* same endianess. (Note: big endian won't work.)
|
||||
* same endianness. (Note: big endian won't work.)
|
||||
*/
|
||||
bitmap_to_le(le_bitmap, block->receivedmap, nbits);
|
||||
|
||||
|
@ -275,7 +275,7 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
|
|||
qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
|
||||
/*
|
||||
* Mark as an end, in case the middle part is screwed up due to
|
||||
* some "misterious" reason.
|
||||
* some "mysterious" reason.
|
||||
*/
|
||||
qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
|
||||
qemu_fflush(file);
|
||||
|
@ -718,7 +718,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
|
|||
/*
|
||||
* Reaching here means the page has hit the xbzrle cache, no matter what
|
||||
* encoding result it is (normal encoding, overflow or skipping the page),
|
||||
* count the page as encoded. This is used to caculate the encoding rate.
|
||||
* count the page as encoded. This is used to calculate the encoding rate.
|
||||
*
|
||||
* Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
|
||||
* 2nd page turns out to be skipped (i.e. no new bytes written to the
|
||||
|
@ -3705,7 +3705,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
|
|||
|
||||
/*
|
||||
* Note: see comments in ramblock_recv_bitmap_send() on why we
|
||||
* need the endianess convertion, and the paddings.
|
||||
* need the endianness conversion, and the paddings.
|
||||
*/
|
||||
local_size = ROUND_UP(local_size, 8);
|
||||
|
||||
|
@ -3743,7 +3743,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
|
|||
}
|
||||
|
||||
/*
|
||||
* Endianess convertion. We are during postcopy (though paused).
|
||||
* Endianness conversion. We are during postcopy (though paused).
|
||||
* The dirty bitmap won't change. We can directly modify it.
|
||||
*/
|
||||
bitmap_from_le(block->bmap, le_bitmap, nbits);
|
||||
|
|
|
@ -1509,7 +1509,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
|
|||
} else {
|
||||
/* This is the source side, we're in a separate thread
|
||||
* or destination prior to migration_fd_process_incoming()
|
||||
* after postcopy, the destination also in a seprate thread.
|
||||
* after postcopy, the destination also in a separate thread.
|
||||
* we can't yield; so we have to poll the fd.
|
||||
* But we need to be able to handle 'cancel' or an error
|
||||
* without hanging forever.
|
||||
|
@ -2266,7 +2266,7 @@ static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
|
|||
* chunk, then start a new chunk and flush() the old chunk.
|
||||
* 3. To keep the hardware busy, we also group chunks into batches
|
||||
* and only require that a batch gets acknowledged in the completion
|
||||
* qeueue instead of each individual chunk.
|
||||
* queue instead of each individual chunk.
|
||||
*/
|
||||
static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
|
||||
uint64_t block_offset, uint64_t offset,
|
||||
|
@ -3148,7 +3148,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
|
|||
if (size > 0) {
|
||||
/*
|
||||
* Add this page to the current 'chunk'. If the chunk
|
||||
* is full, or the page doen't belong to the current chunk,
|
||||
* is full, or the page doesn't belong to the current chunk,
|
||||
* an actual RDMA write will occur and a new chunk will be formed.
|
||||
*/
|
||||
ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
|
||||
|
@ -4101,7 +4101,7 @@ void rdma_start_outgoing_migration(void *opaque,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* RDMA postcopy need a seprate queue pair for return path */
|
||||
/* RDMA postcopy need a separate queue pair for return path */
|
||||
if (migrate_postcopy()) {
|
||||
rdma_return_path = qemu_rdma_data_init(host_port, errp);
|
||||
|
||||
|
|
|
@ -2795,7 +2795,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
|
|||
|
||||
if (!has_live) {
|
||||
/* live default to true so old version of Xen tool stack can have a
|
||||
* successfull live migration */
|
||||
* successful live migration */
|
||||
live = true;
|
||||
}
|
||||
|
||||
|
@ -2818,7 +2818,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
|
|||
* "xen-save-devices-state" and in case of migration failure, libxl
|
||||
* would call "cont".
|
||||
* So call bdrv_inactivate_all (release locks) here to let the other
|
||||
* side of the migration take controle of the images.
|
||||
* side of the migration take control of the images.
|
||||
*/
|
||||
if (live && !saved_vm_running) {
|
||||
ret = bdrv_inactivate_all();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue