mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 04:13:53 -06:00
Drop more useless casts from void * to pointer
Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20221123133811.1398562-1-armbru@redhat.com>
This commit is contained in:
parent
ea3a008d2d
commit
3d558330ad
15 changed files with 24 additions and 28 deletions
|
@ -156,7 +156,7 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
|
||||||
--p; --tmp; --len;
|
--p; --tmp; --len;
|
||||||
if (--offset < 0) {
|
if (--offset < 0) {
|
||||||
offset = p % TARGET_PAGE_SIZE;
|
offset = p % TARGET_PAGE_SIZE;
|
||||||
pag = (char *)page[p / TARGET_PAGE_SIZE];
|
pag = page[p / TARGET_PAGE_SIZE];
|
||||||
if (!pag) {
|
if (!pag) {
|
||||||
pag = g_try_malloc0(TARGET_PAGE_SIZE);
|
pag = g_try_malloc0(TARGET_PAGE_SIZE);
|
||||||
page[p / TARGET_PAGE_SIZE] = pag;
|
page[p / TARGET_PAGE_SIZE] = pag;
|
||||||
|
|
|
@ -405,7 +405,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
|
||||||
g_mutex_lock(&l1_dcache_locks[cache_idx]);
|
g_mutex_lock(&l1_dcache_locks[cache_idx]);
|
||||||
hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr);
|
hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr);
|
||||||
if (!hit_in_l1) {
|
if (!hit_in_l1) {
|
||||||
insn = (InsnData *) userdata;
|
insn = userdata;
|
||||||
__atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST);
|
__atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST);
|
||||||
l1_dcaches[cache_idx]->misses++;
|
l1_dcaches[cache_idx]->misses++;
|
||||||
}
|
}
|
||||||
|
@ -419,7 +419,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
|
||||||
|
|
||||||
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
||||||
if (!access_cache(l2_ucaches[cache_idx], effective_addr)) {
|
if (!access_cache(l2_ucaches[cache_idx], effective_addr)) {
|
||||||
insn = (InsnData *) userdata;
|
insn = userdata;
|
||||||
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
||||||
l2_ucaches[cache_idx]->misses++;
|
l2_ucaches[cache_idx]->misses++;
|
||||||
}
|
}
|
||||||
|
@ -440,7 +440,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
|
||||||
g_mutex_lock(&l1_icache_locks[cache_idx]);
|
g_mutex_lock(&l1_icache_locks[cache_idx]);
|
||||||
hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr);
|
hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr);
|
||||||
if (!hit_in_l1) {
|
if (!hit_in_l1) {
|
||||||
insn = (InsnData *) userdata;
|
insn = userdata;
|
||||||
__atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST);
|
__atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST);
|
||||||
l1_icaches[cache_idx]->misses++;
|
l1_icaches[cache_idx]->misses++;
|
||||||
}
|
}
|
||||||
|
@ -454,7 +454,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
|
||||||
|
|
||||||
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
||||||
if (!access_cache(l2_ucaches[cache_idx], insn_addr)) {
|
if (!access_cache(l2_ucaches[cache_idx], insn_addr)) {
|
||||||
insn = (InsnData *) userdata;
|
insn = userdata;
|
||||||
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
||||||
l2_ucaches[cache_idx]->misses++;
|
l2_ucaches[cache_idx]->misses++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,7 +193,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt,
|
||||||
|
|
||||||
#if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT)
|
#if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT)
|
||||||
VubDev *vdev_blk = req->vdev_blk;
|
VubDev *vdev_blk = req->vdev_blk;
|
||||||
desc = (struct virtio_blk_discard_write_zeroes *)buf;
|
desc = buf;
|
||||||
uint64_t range[2] = { le64toh(desc->sector) << 9,
|
uint64_t range[2] = { le64toh(desc->sector) << 9,
|
||||||
le32toh(desc->num_sectors) << 9 };
|
le32toh(desc->num_sectors) << 9 };
|
||||||
if (type == VIRTIO_BLK_T_DISCARD) {
|
if (type == VIRTIO_BLK_T_DISCARD) {
|
||||||
|
|
|
@ -134,7 +134,7 @@ void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks)
|
||||||
Clock **clkp;
|
Clock **clkp;
|
||||||
/* offset cannot be inside the DeviceState part */
|
/* offset cannot be inside the DeviceState part */
|
||||||
assert(elem->offset > sizeof(DeviceState));
|
assert(elem->offset > sizeof(DeviceState));
|
||||||
clkp = (Clock **)(((void *) dev) + elem->offset);
|
clkp = ((void *)dev) + elem->offset;
|
||||||
if (elem->is_output) {
|
if (elem->is_output) {
|
||||||
*clkp = qdev_init_clock_out(dev, elem->name);
|
*clkp = qdev_init_clock_out(dev, elem->name);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2104,7 +2104,7 @@ static void process_message(VMBus *vmbus)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
msgdata = hv_msg->payload;
|
msgdata = hv_msg->payload;
|
||||||
msg = (struct vmbus_message_header *)msgdata;
|
msg = msgdata;
|
||||||
|
|
||||||
trace_vmbus_process_incoming_message(msg->message_type);
|
trace_vmbus_process_incoming_message(msg->message_type);
|
||||||
|
|
||||||
|
|
|
@ -1429,7 +1429,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
|
||||||
{
|
{
|
||||||
CadenceGEMState *s;
|
CadenceGEMState *s;
|
||||||
uint32_t retval;
|
uint32_t retval;
|
||||||
s = (CadenceGEMState *)opaque;
|
s = opaque;
|
||||||
|
|
||||||
offset >>= 2;
|
offset >>= 2;
|
||||||
retval = s->regs[offset];
|
retval = s->regs[offset];
|
||||||
|
|
|
@ -2472,7 +2472,7 @@ static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
|
||||||
VirtioNetRscChain *chain;
|
VirtioNetRscChain *chain;
|
||||||
VirtioNetRscUnit unit;
|
VirtioNetRscUnit unit;
|
||||||
|
|
||||||
chain = (VirtioNetRscChain *)opq;
|
chain = opq;
|
||||||
hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
|
hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
|
||||||
|
|
||||||
if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
|
if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
|
||||||
|
|
|
@ -4028,14 +4028,14 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
|
||||||
nr_zones++;
|
nr_zones++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
header = (NvmeZoneReportHeader *)buf;
|
header = buf;
|
||||||
header->nr_zones = cpu_to_le64(nr_zones);
|
header->nr_zones = cpu_to_le64(nr_zones);
|
||||||
|
|
||||||
buf_p = buf + sizeof(NvmeZoneReportHeader);
|
buf_p = buf + sizeof(NvmeZoneReportHeader);
|
||||||
for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
|
for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
|
||||||
zone = &ns->zone_array[zone_idx];
|
zone = &ns->zone_array[zone_idx];
|
||||||
if (nvme_zone_matches_filter(zrasf, zone)) {
|
if (nvme_zone_matches_filter(zrasf, zone)) {
|
||||||
z = (NvmeZoneDescr *)buf_p;
|
z = buf_p;
|
||||||
buf_p += sizeof(NvmeZoneDescr);
|
buf_p += sizeof(NvmeZoneDescr);
|
||||||
|
|
||||||
z->zt = zone->d.zt;
|
z->zt = zone->d.zt;
|
||||||
|
|
|
@ -269,8 +269,7 @@ static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
|
||||||
r = g_malloc(sizeof(*r));
|
r = g_malloc(sizeof(*r));
|
||||||
*ring = r;
|
*ring = r;
|
||||||
|
|
||||||
r->ring_state = (PvrdmaRingState *)
|
r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
||||||
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
|
||||||
|
|
||||||
if (!r->ring_state) {
|
if (!r->ring_state) {
|
||||||
rdma_error_report("Failed to map to CQ ring state");
|
rdma_error_report("Failed to map to CQ ring state");
|
||||||
|
@ -405,8 +404,7 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
|
||||||
*rings = sr;
|
*rings = sr;
|
||||||
|
|
||||||
/* Create send ring */
|
/* Create send ring */
|
||||||
sr->ring_state = (PvrdmaRingState *)
|
sr->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
||||||
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
|
||||||
if (!sr->ring_state) {
|
if (!sr->ring_state) {
|
||||||
rdma_error_report("Failed to map to QP ring state");
|
rdma_error_report("Failed to map to QP ring state");
|
||||||
goto out_free_sr_mem;
|
goto out_free_sr_mem;
|
||||||
|
@ -646,8 +644,7 @@ static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
|
||||||
r = g_malloc(sizeof(*r));
|
r = g_malloc(sizeof(*r));
|
||||||
*ring = r;
|
*ring = r;
|
||||||
|
|
||||||
r->ring_state = (PvrdmaRingState *)
|
r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
||||||
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
|
|
||||||
if (!r->ring_state) {
|
if (!r->ring_state) {
|
||||||
rdma_error_report("Failed to map tp SRQ ring state");
|
rdma_error_report("Failed to map tp SRQ ring state");
|
||||||
goto out_free_ring_mem;
|
goto out_free_ring_mem;
|
||||||
|
|
|
@ -149,7 +149,7 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle)
|
||||||
|
|
||||||
ring = (PvrdmaRing *)qp->opaque;
|
ring = (PvrdmaRing *)qp->opaque;
|
||||||
|
|
||||||
wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring);
|
wqe = pvrdma_ring_next_elem_read(ring);
|
||||||
while (wqe) {
|
while (wqe) {
|
||||||
CompHandlerCtx *comp_ctx;
|
CompHandlerCtx *comp_ctx;
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
|
||||||
|
|
||||||
ring = &((PvrdmaRing *)qp->opaque)[1];
|
ring = &((PvrdmaRing *)qp->opaque)[1];
|
||||||
|
|
||||||
wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
|
wqe = pvrdma_ring_next_elem_read(ring);
|
||||||
while (wqe) {
|
while (wqe) {
|
||||||
CompHandlerCtx *comp_ctx;
|
CompHandlerCtx *comp_ctx;
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle)
|
||||||
|
|
||||||
ring = (PvrdmaRing *)srq->opaque;
|
ring = (PvrdmaRing *)srq->opaque;
|
||||||
|
|
||||||
wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
|
wqe = pvrdma_ring_next_elem_read(ring);
|
||||||
while (wqe) {
|
while (wqe) {
|
||||||
CompHandlerCtx *comp_ctx;
|
CompHandlerCtx *comp_ctx;
|
||||||
|
|
||||||
|
|
|
@ -775,8 +775,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
output_size = s->config.probe_size + sizeof(tail);
|
output_size = s->config.probe_size + sizeof(tail);
|
||||||
buf = g_malloc0(output_size);
|
buf = g_malloc0(output_size);
|
||||||
|
|
||||||
ptail = (struct virtio_iommu_req_tail *)
|
ptail = buf + s->config.probe_size;
|
||||||
(buf + s->config.probe_size);
|
|
||||||
ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
|
ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5471,7 +5471,7 @@ static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
|
||||||
for (i = 0; i < se->nb_fields; i++) {
|
for (i = 0; i < se->nb_fields; i++) {
|
||||||
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
|
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
|
||||||
assert(*field_types == TYPE_PTRVOID);
|
assert(*field_types == TYPE_PTRVOID);
|
||||||
target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
|
target_rt_dev_ptr = argptr + src_offsets[i];
|
||||||
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
|
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
|
||||||
if (*target_rt_dev_ptr != 0) {
|
if (*target_rt_dev_ptr != 0) {
|
||||||
*host_rt_dev_ptr = (unsigned long)lock_user_string(
|
*host_rt_dev_ptr = (unsigned long)lock_user_string(
|
||||||
|
|
|
@ -388,7 +388,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
|
||||||
MemTxAttrs attrs = { 0 };
|
MemTxAttrs attrs = { 0 };
|
||||||
|
|
||||||
if (!df) {
|
if (!df) {
|
||||||
ptr = (uint8_t *) buffer;
|
ptr = buffer;
|
||||||
} else {
|
} else {
|
||||||
ptr = buffer + size * count - size;
|
ptr = buffer + size * count - size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,11 +73,11 @@ int main(int argc, char *argv[argc])
|
||||||
ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base);
|
ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base);
|
||||||
|
|
||||||
/* finally can we read/write the heap */
|
/* finally can we read/write the heap */
|
||||||
ptr_to_heap = (uint32_t *) info.heap_base;
|
ptr_to_heap = info.heap_base;
|
||||||
for (i = 0; i < 512; i++) {
|
for (i = 0; i < 512; i++) {
|
||||||
*ptr_to_heap++ = i;
|
*ptr_to_heap++ = i;
|
||||||
}
|
}
|
||||||
ptr_to_heap = (uint32_t *) info.heap_base;
|
ptr_to_heap = info.heap_base;
|
||||||
for (i = 0; i < 512; i++) {
|
for (i = 0; i < 512; i++) {
|
||||||
uint32_t tmp = *ptr_to_heap;
|
uint32_t tmp = *ptr_to_heap;
|
||||||
if (tmp != i) {
|
if (tmp != i) {
|
||||||
|
|
|
@ -271,7 +271,7 @@ static void collect_usable_iova_ranges(QEMUVFIOState *s, void *buf)
|
||||||
if (!cap->next) {
|
if (!cap->next) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
cap = (struct vfio_info_cap_header *)(buf + cap->next);
|
cap = buf + cap->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap;
|
cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue