intel-iommu: introduce vtd_page_walk_info

During the recursive page walking of IOVA page tables, some stack
variables are constant variables and never changed during the whole page
walking procedure.  Isolate them into a struct so that we don't need to
pass those contants down the stack every time and multiple times.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Peter Xu 2018-05-18 15:25:13 +08:00 committed by Michael S. Tsirkin
parent 4f8a62a933
commit fe215b0cbb

View file

@ -748,9 +748,27 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
/**
* Constant information used during page walking
*
* @hook_fn: hook func to be called when detected page
* @private: private data to be passed into hook func
* @notify_unmap: whether we should notify invalid entries
* @aw: maximum address width
*/
typedef struct {
vtd_page_walk_hook hook_fn;
void *private;
bool notify_unmap;
uint8_t aw;
} vtd_page_walk_info;
static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level, static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level,
vtd_page_walk_hook hook_fn, void *private) vtd_page_walk_info *info)
{ {
vtd_page_walk_hook hook_fn = info->hook_fn;
void *private = info->private;
assert(hook_fn); assert(hook_fn);
trace_vtd_page_walk_one(level, entry->iova, entry->translated_addr, trace_vtd_page_walk_one(level, entry->iova, entry->translated_addr,
entry->addr_mask, entry->perm); entry->addr_mask, entry->perm);
@ -763,17 +781,13 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level,
* @addr: base GPA addr to start the walk * @addr: base GPA addr to start the walk
* @start: IOVA range start address * @start: IOVA range start address
* @end: IOVA range end address (start <= addr < end) * @end: IOVA range end address (start <= addr < end)
* @hook_fn: hook func to be called when detected page
* @private: private data to be passed into hook func
* @read: whether parent level has read permission * @read: whether parent level has read permission
* @write: whether parent level has write permission * @write: whether parent level has write permission
* @notify_unmap: whether we should notify invalid entries * @info: constant information for the page walk
* @aw: maximum address width
*/ */
static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
uint64_t end, vtd_page_walk_hook hook_fn, uint64_t end, uint32_t level, bool read,
void *private, uint32_t level, bool read, bool write, vtd_page_walk_info *info)
bool write, bool notify_unmap, uint8_t aw)
{ {
bool read_cur, write_cur, entry_valid; bool read_cur, write_cur, entry_valid;
uint32_t offset; uint32_t offset;
@ -823,24 +837,24 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
if (vtd_is_last_slpte(slpte, level)) { if (vtd_is_last_slpte(slpte, level)) {
/* NOTE: this is only meaningful if entry_valid == true */ /* NOTE: this is only meaningful if entry_valid == true */
entry.translated_addr = vtd_get_slpte_addr(slpte, aw); entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
if (!entry_valid && !notify_unmap) { if (!entry_valid && !info->notify_unmap) {
trace_vtd_page_walk_skip_perm(iova, iova_next); trace_vtd_page_walk_skip_perm(iova, iova_next);
goto next; goto next;
} }
ret = vtd_page_walk_one(&entry, level, hook_fn, private); ret = vtd_page_walk_one(&entry, level, info);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
} else { } else {
if (!entry_valid) { if (!entry_valid) {
if (notify_unmap) { if (info->notify_unmap) {
/* /*
* The whole entry is invalid; unmap it all. * The whole entry is invalid; unmap it all.
* Translated address is meaningless, zero it. * Translated address is meaningless, zero it.
*/ */
entry.translated_addr = 0x0; entry.translated_addr = 0x0;
ret = vtd_page_walk_one(&entry, level, hook_fn, private); ret = vtd_page_walk_one(&entry, level, info);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
@ -849,10 +863,9 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
} }
goto next; goto next;
} }
ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, aw), iova, ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw),
MIN(iova_next, end), hook_fn, private, iova, MIN(iova_next, end), level - 1,
level - 1, read_cur, write_cur, read_cur, write_cur, info);
notify_unmap, aw);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
@ -871,28 +884,24 @@ next:
* @ce: context entry to walk upon * @ce: context entry to walk upon
* @start: IOVA address to start the walk * @start: IOVA address to start the walk
* @end: IOVA range end address (start <= addr < end) * @end: IOVA range end address (start <= addr < end)
* @hook_fn: the hook that to be called for each detected area * @info: page walking information struct
* @private: private data for the hook function
* @aw: maximum address width
*/ */
static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
vtd_page_walk_hook hook_fn, void *private, vtd_page_walk_info *info)
bool notify_unmap, uint8_t aw)
{ {
dma_addr_t addr = vtd_ce_get_slpt_base(ce); dma_addr_t addr = vtd_ce_get_slpt_base(ce);
uint32_t level = vtd_ce_get_level(ce); uint32_t level = vtd_ce_get_level(ce);
if (!vtd_iova_range_check(start, ce, aw)) { if (!vtd_iova_range_check(start, ce, info->aw)) {
return -VTD_FR_ADDR_BEYOND_MGAW; return -VTD_FR_ADDR_BEYOND_MGAW;
} }
if (!vtd_iova_range_check(end, ce, aw)) { if (!vtd_iova_range_check(end, ce, info->aw)) {
/* Fix end so that it reaches the maximum */ /* Fix end so that it reaches the maximum */
end = vtd_iova_limit(ce, aw); end = vtd_iova_limit(ce, info->aw);
} }
return vtd_page_walk_level(addr, start, end, hook_fn, private, return vtd_page_walk_level(addr, start, end, level, true, true, info);
level, true, true, notify_unmap, aw);
} }
/* Map a device to its corresponding domain (context-entry) */ /* Map a device to its corresponding domain (context-entry) */
@ -1449,14 +1458,19 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
vtd_as->devfn, &ce); vtd_as->devfn, &ce);
if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
if (vtd_as_has_map_notifier(vtd_as)) { if (vtd_as_has_map_notifier(vtd_as)) {
vtd_page_walk_info info = {
.hook_fn = vtd_page_invalidate_notify_hook,
.private = (void *)&vtd_as->iommu,
.notify_unmap = true,
.aw = s->aw_bits,
};
/* /*
* As long as we have MAP notifications registered in * As long as we have MAP notifications registered in
* any of our IOMMU notifiers, we need to sync the * any of our IOMMU notifiers, we need to sync the
* shadow page table. * shadow page table.
*/ */
vtd_page_walk(&ce, addr, addr + size, vtd_page_walk(&ce, addr, addr + size, &info);
vtd_page_invalidate_notify_hook,
(void *)&vtd_as->iommu, true, s->aw_bits);
} else { } else {
/* /*
* For UNMAP-only notifiers, we don't need to walk the * For UNMAP-only notifiers, we don't need to walk the
@ -2924,8 +2938,14 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
ce.hi, ce.lo); ce.hi, ce.lo);
if (vtd_as_has_map_notifier(vtd_as)) { if (vtd_as_has_map_notifier(vtd_as)) {
/* This is required only for MAP typed notifiers */ /* This is required only for MAP typed notifiers */
vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false, vtd_page_walk_info info = {
s->aw_bits); .hook_fn = vtd_replay_hook,
.private = (void *)n,
.notify_unmap = false,
.aw = s->aw_bits,
};
vtd_page_walk(&ce, 0, ~0ULL, &info);
} }
} else { } else {
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),