mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 01:03:55 -06:00
block/nvme: Extract nvme_poll_queue()
As we want to do per-queue polling, extract the nvme_poll_queue() method which operates on a single queue. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20200821195359.1285345-15-philmd@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
0a28b02ef9
commit
7a1fb2ef40
1 changed files with 27 additions and 17 deletions
44
block/nvme.c
44
block/nvme.c
|
@ -590,31 +590,41 @@ out:
|
||||||
qemu_vfree(id);
|
qemu_vfree(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nvme_poll_queue(NVMeQueuePair *q)
|
||||||
|
{
|
||||||
|
bool progress = false;
|
||||||
|
|
||||||
|
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
||||||
|
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do an early check for completions. q->lock isn't needed because
|
||||||
|
* nvme_process_completion() only runs in the event loop thread and
|
||||||
|
* cannot race with itself.
|
||||||
|
*/
|
||||||
|
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_mutex_lock(&q->lock);
|
||||||
|
while (nvme_process_completion(q)) {
|
||||||
|
/* Keep polling */
|
||||||
|
progress = true;
|
||||||
|
}
|
||||||
|
qemu_mutex_unlock(&q->lock);
|
||||||
|
|
||||||
|
return progress;
|
||||||
|
}
|
||||||
|
|
||||||
static bool nvme_poll_queues(BDRVNVMeState *s)
|
static bool nvme_poll_queues(BDRVNVMeState *s)
|
||||||
{
|
{
|
||||||
bool progress = false;
|
bool progress = false;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < s->nr_queues; i++) {
|
for (i = 0; i < s->nr_queues; i++) {
|
||||||
NVMeQueuePair *q = s->queues[i];
|
if (nvme_poll_queue(s->queues[i])) {
|
||||||
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
|
||||||
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Do an early check for completions. q->lock isn't needed because
|
|
||||||
* nvme_process_completion() only runs in the event loop thread and
|
|
||||||
* cannot race with itself.
|
|
||||||
*/
|
|
||||||
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_mutex_lock(&q->lock);
|
|
||||||
while (nvme_process_completion(q)) {
|
|
||||||
/* Keep polling */
|
|
||||||
progress = true;
|
progress = true;
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&q->lock);
|
|
||||||
}
|
}
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue