qemu/nbd/client-connection.c
Nir Soffer e9f4550b74 nbd: Set unix socket send buffer on macOS
On macOS we need to increase unix socket buffers size on the client and
server to get good performance. We set socket buffers on macOS after
connecting or accepting a client connection.

Testing shows that setting socket receive buffer size (SO_RCVBUF) has no
effect on performance, so we set only the send buffer size (SO_SNDBUF).
It seems to work like Linux but not documented.

Testing shows that optimal buffer size is 512k to 4 MiB, depending on
the test case. The difference is very small, so I chose 2 MiB.

I tested reading from qemu-nbd and writing to qemu-nbd with qemu-img and
computing a blkhash with nbdcopy and blksum.

To focus on NBD communication and get less noisy results, I tested
reading and writing to null-co driver. I added a read-pattern option to
the null-co driver to return data full of 0xff:

    NULL="json:{'driver': 'raw', 'file': {'driver': 'null-co', 'size': '10g', 'read-pattern': 255}}"

For testing buffer size I added an environment variable for setting the
socket buffer size.

Read from qemu-nbd via qemu-img convert. In this test buffer size of 2m
is optimal (12.6 times faster).

    qemu-nbd -r -t -e 0 -f raw -k /tmp/nbd.sock "$NULL" &
    qemu-img convert -f raw -O raw -W -n "nbd+unix:///?socket=/tmp/nbd.sock" "$NULL"

| buffer size | time    | user    | system  |
|-------------|---------|---------|---------|
|     default |  13.361 |   2.653 |   5.702 |
|       65536 |   2.283 |   0.204 |   1.318 |
|      131072 |   1.673 |   0.062 |   1.008 |
|      262144 |   1.592 |   0.053 |   0.952 |
|      524288 |   1.496 |   0.049 |   0.887 |
|     1048576 |   1.234 |   0.047 |   0.738 |
|     2097152 |   1.060 |   0.080 |   0.602 |
|     4194304 |   1.061 |   0.076 |   0.604 |

Write to qemu-nbd with qemu-img convert. In this test buffer size of 2m
is optimal (9.2 times faster).

    qemu-nbd -t -e 0 -f raw -k /tmp/nbd.sock "$NULL" &
    qemu-img convert -f raw -O raw -W -n "$NULL" "nbd+unix:///?socket=/tmp/nbd.sock"

| buffer size | time    | user    | system  |
|-------------|---------|---------|---------|
|     default |   8.063 |   2.522 |   4.184 |
|       65536 |   1.472 |   0.430 |   0.867 |
|      131072 |   1.071 |   0.297 |   0.654 |
|      262144 |   1.012 |   0.239 |   0.587 |
|      524288 |   0.970 |   0.201 |   0.514 |
|     1048576 |   0.895 |   0.184 |   0.454 |
|     2097152 |   0.877 |   0.174 |   0.440 |
|     4194304 |   0.944 |   0.231 |   0.535 |

Compute a blkhash with nbdcopy, using 4 NBD connections and 256k request
size. In this test buffer size of 4m is optimal (5.1 times faster).

    qemu-nbd -r -t -e 0 -f raw -k /tmp/nbd.sock "$NULL" &
    nbdcopy --blkhash "nbd+unix:///?socket=/tmp/nbd.sock" null:

| buffer size | time    | user    | system  |
|-------------|---------|---------|---------|
|     default |   8.624 |   5.727 |   6.507 |
|       65536 |   2.563 |   4.760 |   2.498 |
|      131072 |   1.903 |   4.559 |   2.093 |
|      262144 |   1.759 |   4.513 |   1.935 |
|      524288 |   1.729 |   4.489 |   1.924 |
|     1048576 |   1.696 |   4.479 |   1.884 |
|     2097152 |   1.710 |   4.480 |   1.763 |
|     4194304 |   1.687 |   4.479 |   1.712 |

Compute a blkhash with blksum, using 1 NBD connection and 256k read
size. In this test buffer size of 512k is optimal (10.3 times faster).

    qemu-nbd -r -t -e 0 -f raw -k /tmp/nbd.sock "$NULL" &
    blksum "nbd+unix:///?socket=/tmp/nbd.sock"

| buffer size | time    | user    | system  |
|-------------|---------|---------|---------|
|     default |  13.085 |   5.664 |   6.461 |
|       65536 |   3.299 |   5.106 |   2.515 |
|      131072 |   2.396 |   4.989 |   2.069 |
|      262144 |   1.607 |   4.724 |   1.555 |
|      524288 |   1.271 |   4.528 |   1.224 |
|     1048576 |   1.294 |   4.565 |   1.333 |
|     2097152 |   1.299 |   4.569 |   1.344 |
|     4194304 |   1.291 |   4.559 |   1.327 |

Signed-off-by: Nir Soffer <nirsof@gmail.com>
Message-ID: <20250517201154.88456-3-nirsof@gmail.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2025-05-29 16:37:15 -05:00

425 lines
13 KiB
C

/*
* QEMU Block driver for NBD
*
* Copyright (c) 2021 Virtuozzo International GmbH.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "trace.h"
#include "block/nbd.h"
#include "qapi/qapi-visit-sockets.h"
#include "qapi/clone-visitor.h"
#include "qemu/coroutine.h"
#include "nbd/nbd-internal.h"
struct NBDClientConnection {
/* Initialization constants, never change */
SocketAddress *saddr; /* address to connect to */
QCryptoTLSCreds *tlscreds;
char *tlshostname;
NBDExportInfo initial_info;
bool do_negotiation;
bool do_retry;
QemuMutex mutex;
NBDExportInfo updated_info;
/*
* @sioc represents a successful result. While thread is running, @sioc is
* used only by thread and not protected by mutex. When thread is not
* running, @sioc is stolen by nbd_co_establish_connection() under mutex.
*/
QIOChannelSocket *sioc;
QIOChannel *ioc;
/*
* @err represents previous attempt. It may be copied by
* nbd_co_establish_connection() when it reports failure.
*/
Error *err;
/* All further fields are accessed only under mutex */
bool running; /* thread is running now */
bool detached; /* thread is detached and should cleanup the state */
/*
* wait_co: if non-NULL, which coroutine to wake in
* nbd_co_establish_connection() after yield()
*/
Coroutine *wait_co;
};
/*
* The function isn't protected by any mutex, only call it when the client
* connection attempt has not yet started.
*/
void nbd_client_connection_enable_retry(NBDClientConnection *conn)
{
conn->do_retry = true;
}
NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
bool do_negotiation,
const char *export_name,
const char *x_dirty_bitmap,
QCryptoTLSCreds *tlscreds,
const char *tlshostname)
{
NBDClientConnection *conn = g_new(NBDClientConnection, 1);
object_ref(OBJECT(tlscreds));
*conn = (NBDClientConnection) {
.saddr = QAPI_CLONE(SocketAddress, saddr),
.tlscreds = tlscreds,
.tlshostname = g_strdup(tlshostname),
.do_negotiation = do_negotiation,
.initial_info.request_sizes = true,
.initial_info.mode = NBD_MODE_EXTENDED,
.initial_info.base_allocation = true,
.initial_info.x_dirty_bitmap = g_strdup(x_dirty_bitmap),
.initial_info.name = g_strdup(export_name ?: "")
};
qemu_mutex_init(&conn->mutex);
return conn;
}
static void nbd_client_connection_do_free(NBDClientConnection *conn)
{
if (conn->sioc) {
qio_channel_close(QIO_CHANNEL(conn->sioc), NULL);
object_unref(OBJECT(conn->sioc));
}
error_free(conn->err);
qapi_free_SocketAddress(conn->saddr);
g_free(conn->tlshostname);
object_unref(OBJECT(conn->tlscreds));
g_free(conn->initial_info.x_dirty_bitmap);
g_free(conn->initial_info.name);
g_free(conn);
}
/*
* Connect to @addr and do NBD negotiation if @info is not null. If @tlscreds
* are given @outioc is returned. @outioc is provided only on success. The call
* may be cancelled from other thread by simply qio_channel_shutdown(sioc).
*/
static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr,
NBDExportInfo *info, QCryptoTLSCreds *tlscreds,
const char *tlshostname,
QIOChannel **outioc, Error **errp)
{
int ret;
if (outioc) {
*outioc = NULL;
}
ret = qio_channel_socket_connect_sync(sioc, addr, errp);
if (ret < 0) {
return ret;
}
nbd_set_socket_send_buffer(sioc);
qio_channel_set_delay(QIO_CHANNEL(sioc), false);
if (!info) {
return 0;
}
ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), tlscreds, tlshostname,
outioc, info, errp);
if (ret < 0) {
/*
* nbd_receive_negotiate() may setup tls ioc and return it even on
* failure path. In this case we should use it instead of original
* channel.
*/
if (outioc && *outioc) {
qio_channel_close(*outioc, NULL);
object_unref(OBJECT(*outioc));
*outioc = NULL;
} else {
qio_channel_close(QIO_CHANNEL(sioc), NULL);
}
return ret;
}
return 0;
}
static void *connect_thread_func(void *opaque)
{
NBDClientConnection *conn = opaque;
int ret;
bool do_free;
uint64_t timeout = 1;
uint64_t max_timeout = 16;
qemu_mutex_lock(&conn->mutex);
while (!conn->detached) {
Error *local_err = NULL;
assert(!conn->sioc);
conn->sioc = qio_channel_socket_new();
qemu_mutex_unlock(&conn->mutex);
conn->updated_info = conn->initial_info;
ret = nbd_connect(conn->sioc, conn->saddr,
conn->do_negotiation ? &conn->updated_info : NULL,
conn->tlscreds, conn->tlshostname,
&conn->ioc, &local_err);
/*
* conn->updated_info will finally be returned to the user. Clear the
* pointers to our internally allocated strings, which are IN parameters
* of nbd_receive_negotiate() and therefore nbd_connect(). Caller
* shouldn't be interested in these fields.
*/
conn->updated_info.x_dirty_bitmap = NULL;
conn->updated_info.name = NULL;
qemu_mutex_lock(&conn->mutex);
error_free(conn->err);
conn->err = NULL;
error_propagate(&conn->err, local_err);
if (ret < 0) {
object_unref(OBJECT(conn->sioc));
conn->sioc = NULL;
if (conn->do_retry && !conn->detached) {
trace_nbd_connect_thread_sleep(timeout);
qemu_mutex_unlock(&conn->mutex);
sleep(timeout);
if (timeout < max_timeout) {
timeout *= 2;
}
qemu_mutex_lock(&conn->mutex);
continue;
}
}
break;
}
/* mutex is locked */
assert(conn->running);
conn->running = false;
if (conn->wait_co) {
aio_co_wake(conn->wait_co);
conn->wait_co = NULL;
}
do_free = conn->detached;
qemu_mutex_unlock(&conn->mutex);
if (do_free) {
nbd_client_connection_do_free(conn);
}
return NULL;
}
void nbd_client_connection_release(NBDClientConnection *conn)
{
bool do_free = false;
if (!conn) {
return;
}
WITH_QEMU_LOCK_GUARD(&conn->mutex) {
assert(!conn->detached);
if (conn->running) {
conn->detached = true;
} else {
do_free = true;
}
if (conn->sioc) {
qio_channel_shutdown(QIO_CHANNEL(conn->sioc),
QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
}
if (do_free) {
nbd_client_connection_do_free(conn);
}
}
/*
* Get a new connection in context of @conn:
* if the thread is running, wait for completion
* if the thread already succeeded in the background, and user didn't get the
* result, just return it now
* otherwise the thread is not running, so start a thread and wait for
* completion
*
* If @blocking is false, don't wait for the thread, return immediately.
*
* If @info is not NULL, also do nbd-negotiation after successful connection.
* In this case info is used only as out parameter, and is fully initialized by
* nbd_co_establish_connection(). "IN" fields of info as well as related only to
* nbd_receive_export_list() would be zero (see description of NBDExportInfo in
* include/block/nbd.h).
*/
QIOChannel *coroutine_fn
nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
bool blocking, Error **errp)
{
QemuThread thread;
if (conn->do_negotiation) {
assert(info);
}
WITH_QEMU_LOCK_GUARD(&conn->mutex) {
/*
* Don't call nbd_co_establish_connection() in several coroutines in
* parallel. Only one call at once is supported.
*/
assert(!conn->wait_co);
if (!conn->running) {
if (conn->sioc) {
/* Previous attempt finally succeeded in background */
if (conn->do_negotiation) {
memcpy(info, &conn->updated_info, sizeof(*info));
if (conn->ioc) {
/* TLS channel now has own reference to parent */
object_unref(OBJECT(conn->sioc));
conn->sioc = NULL;
return g_steal_pointer(&conn->ioc);
}
}
assert(!conn->ioc);
return QIO_CHANNEL(g_steal_pointer(&conn->sioc));
}
conn->running = true;
qemu_thread_create(&thread, "nbd-connect",
connect_thread_func, conn, QEMU_THREAD_DETACHED);
}
if (!blocking) {
if (conn->err) {
error_propagate(errp, error_copy(conn->err));
} else {
error_setg(errp, "No connection at the moment");
}
return NULL;
}
conn->wait_co = qemu_coroutine_self();
}
/*
* We are going to wait for connect-thread finish, but
* nbd_co_establish_connection_cancel() can interrupt.
*/
qemu_coroutine_yield();
WITH_QEMU_LOCK_GUARD(&conn->mutex) {
if (conn->running) {
/*
* The connection attempt was canceled and the coroutine resumed
* before the connection thread finished its job. Report the
* attempt as failed, but leave the connection thread running,
* to reuse it for the next connection attempt.
*/
if (conn->err) {
error_propagate(errp, error_copy(conn->err));
} else {
/*
* The only possible case here is cancelling by open_timer
* during nbd_open(). So, the error message is for that case.
* If we have more use cases, we can refactor
* nbd_co_establish_connection_cancel() to take an additional
* parameter cancel_reason, that would be passed than to the
* caller of cancelled nbd_co_establish_connection().
*/
error_setg(errp, "Connection attempt cancelled by timeout");
}
return NULL;
} else {
/* Thread finished. There must be either error or sioc */
assert(!conn->err != !conn->sioc);
if (conn->err) {
error_propagate(errp, error_copy(conn->err));
return NULL;
}
if (conn->do_negotiation) {
memcpy(info, &conn->updated_info, sizeof(*info));
if (conn->ioc) {
/* TLS channel now has own reference to parent */
object_unref(OBJECT(conn->sioc));
conn->sioc = NULL;
return g_steal_pointer(&conn->ioc);
}
}
assert(!conn->ioc);
return QIO_CHANNEL(g_steal_pointer(&conn->sioc));
}
}
abort(); /* unreachable */
}
/*
* nbd_co_establish_connection_cancel
* Cancel nbd_co_establish_connection() asynchronously.
*
* Note that this function neither directly stops the thread nor closes the
* socket, but rather safely wakes nbd_co_establish_connection() which is
* sleeping in yield()
*/
void nbd_co_establish_connection_cancel(NBDClientConnection *conn)
{
Coroutine *wait_co = NULL;
WITH_QEMU_LOCK_GUARD(&conn->mutex) {
wait_co = g_steal_pointer(&conn->wait_co);
}
if (wait_co) {
aio_co_wake(wait_co);
}
}