mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-01 23:03:54 -06:00
rcu: Use automatic rc_read unlock in core memory/exec code
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20191007143642.301445-6-dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
987ab2a549
commit
694ea274d9
3 changed files with 125 additions and 158 deletions
|
@ -193,30 +193,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx],
|
||||
num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
|
@ -234,7 +233,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
|
@ -256,8 +255,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
|||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
|
@ -309,13 +306,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
|
|||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
set_bit_atomic(offset, blocks->blocks[idx]);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
|
@ -334,39 +329,37 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, length);
|
||||
}
|
||||
|
||||
|
@ -396,36 +389,35 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
|
||||
}
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
|
||||
temp);
|
||||
}
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
|
||||
} else {
|
||||
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue