vhost-user: fully use new backend/frontend naming

Slave/master nomenclature was replaced with backend/frontend in commit
1fc19b6527 ("vhost-user: Adopt new backend naming")

This patch replaces all remaining uses of master and slave in the
codebase.

Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Message-Id: <20230613080849.2115347-1-manos.pitsidianakis@linaro.org>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
This commit is contained in:
Manos Pitsidianakis 2023-06-13 11:08:48 +03:00 committed by Michael S. Tsirkin
parent 535a3d9a32
commit f8ed3648b5
12 changed files with 74 additions and 72 deletions

View file

@ -421,8 +421,8 @@ vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
}
/*
* Processes a reply on the slave channel.
* Entered with slave_mutex held and releases it before exit.
* Processes a reply on the backend channel.
* Entered with backend_mutex held and releases it before exit.
* Returns true on success.
*/
static bool
@ -436,7 +436,7 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
goto out;
}
if (!vu_message_read_default(dev, dev->slave_fd, &msg_reply)) {
if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
goto out;
}
@ -449,7 +449,7 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
result = msg_reply.payload.u64 == 0;
out:
pthread_mutex_unlock(&dev->slave_mutex);
pthread_mutex_unlock(&dev->backend_mutex);
return result;
}
@ -1393,13 +1393,13 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
return false;
}
pthread_mutex_lock(&dev->slave_mutex);
if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
pthread_mutex_unlock(&dev->slave_mutex);
pthread_mutex_lock(&dev->backend_mutex);
if (!vu_message_write(dev, dev->backend_fd, &vmsg)) {
pthread_mutex_unlock(&dev->backend_mutex);
return false;
}
/* Also unlocks the slave_mutex */
/* Also unlocks the backend_mutex */
return vu_process_message_reply(dev, &vmsg);
}
@ -1463,7 +1463,7 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
* a device implementation can return it in its callback
* (get_protocol_features) if it wants to use this for
* simulation, but it is otherwise not desirable (if even
* implemented by the master.)
* implemented by the frontend.)
*/
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
@ -1508,7 +1508,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
* of the other features are required.
* Theoretically, one could use only kick messages, or do them without
* having F_REPLY_ACK, but too many (possibly pending) messages on the
* socket will eventually cause the master to hang, to avoid this in
* socket will eventually cause the frontend to hang, to avoid this in
* scenarios where not desired enforce that the settings are in a way
* that actually enables the simulation case.
*/
@ -1550,18 +1550,18 @@ vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg)
{
if (vmsg->fd_num != 1) {
vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num);
return false;
}
if (dev->slave_fd != -1) {
close(dev->slave_fd);
if (dev->backend_fd != -1) {
close(dev->backend_fd);
}
dev->slave_fd = vmsg->fds[0];
DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
dev->backend_fd = vmsg->fds[0];
DPRINT("Got backend_fd: %d\n", vmsg->fds[0]);
return false;
}
@ -1577,7 +1577,7 @@ vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
}
if (ret) {
/* resize to zero to indicate an error to master */
/* resize to zero to indicate an error to frontend */
vmsg->size = 0;
}
@ -1917,7 +1917,7 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
case VHOST_USER_SET_VRING_ENABLE:
return vu_set_vring_enable_exec(dev, vmsg);
case VHOST_USER_SET_BACKEND_REQ_FD:
return vu_set_slave_req_fd(dev, vmsg);
return vu_set_backend_req_fd(dev, vmsg);
case VHOST_USER_GET_CONFIG:
return vu_get_config(dev, vmsg);
case VHOST_USER_SET_CONFIG:
@ -2038,11 +2038,11 @@ vu_deinit(VuDev *dev)
}
vu_close_log(dev);
if (dev->slave_fd != -1) {
close(dev->slave_fd);
dev->slave_fd = -1;
if (dev->backend_fd != -1) {
close(dev->backend_fd);
dev->backend_fd = -1;
}
pthread_mutex_destroy(&dev->slave_mutex);
pthread_mutex_destroy(&dev->backend_mutex);
if (dev->sock != -1) {
close(dev->sock);
@ -2080,8 +2080,8 @@ vu_init(VuDev *dev,
dev->remove_watch = remove_watch;
dev->iface = iface;
dev->log_call_fd = -1;
pthread_mutex_init(&dev->slave_mutex, NULL);
dev->slave_fd = -1;
pthread_mutex_init(&dev->backend_mutex, NULL);
dev->backend_fd = -1;
dev->max_queues = max_queues;
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
@ -2439,9 +2439,9 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
vu_message_write(dev, dev->slave_fd, &vmsg);
vu_message_write(dev, dev->backend_fd, &vmsg);
if (ack) {
vu_message_read_default(dev, dev->slave_fd, &vmsg);
vu_message_read_default(dev, dev->backend_fd, &vmsg);
}
return;
}
@ -2468,7 +2468,7 @@ void vu_config_change_msg(VuDev *dev)
.flags = VHOST_USER_VERSION,
};
vu_message_write(dev, dev->slave_fd, &vmsg);
vu_message_write(dev, dev->backend_fd, &vmsg);
}
static inline void