mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
iommu: Introduce IOMMU emulation infrastructure
This patch adds the basic infrastructure necessary to emulate an IOMMU visible to the guest. The DMAContext structure is extended with information and a callback describing the translation, and the various DMA functions used by devices will now perform IOMMU translation using this callback. Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Richard Henderson <rth@twiddle.net> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
e2f89926f1
commit
e5332e6334
3 changed files with 258 additions and 24 deletions
172
dma-helpers.c
172
dma-helpers.c
|
@ -9,8 +9,12 @@
|
|||
|
||||
#include "dma.h"
|
||||
#include "trace.h"
|
||||
#include "range.h"
|
||||
#include "qemu-thread.h"
|
||||
|
||||
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
||||
/* #define DEBUG_IOMMU */
|
||||
|
||||
static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
|
||||
{
|
||||
#define FILLBUF_SIZE 512
|
||||
uint8_t fillbuf[FILLBUF_SIZE];
|
||||
|
@ -23,6 +27,15 @@ int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
|||
len -= len;
|
||||
addr += len;
|
||||
}
|
||||
}
|
||||
|
||||
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
||||
{
|
||||
if (dma_has_iommu(dma)) {
|
||||
return iommu_dma_memory_set(dma, addr, c, len);
|
||||
}
|
||||
do_dma_memory_set(addr, c, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -260,3 +273,160 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
|||
{
|
||||
bdrv_acct_start(bs, cookie, sg->size, type);
|
||||
}
|
||||
|
||||
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
|
||||
DMADirection dir)
|
||||
{
|
||||
target_phys_addr_t paddr, plen;
|
||||
|
||||
#ifdef DEBUG_IOMMU
|
||||
fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
|
||||
" len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
||||
#endif
|
||||
|
||||
while (len) {
|
||||
if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The translation might be valid for larger regions. */
|
||||
if (plen > len) {
|
||||
plen = len;
|
||||
}
|
||||
|
||||
len -= plen;
|
||||
addr += plen;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
|
||||
void *buf, dma_addr_t len, DMADirection dir)
|
||||
{
|
||||
target_phys_addr_t paddr, plen;
|
||||
int err;
|
||||
|
||||
#ifdef DEBUG_IOMMU
|
||||
fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
|
||||
DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
||||
#endif
|
||||
|
||||
while (len) {
|
||||
err = dma->translate(dma, addr, &paddr, &plen, dir);
|
||||
if (err) {
|
||||
/*
|
||||
* In case of failure on reads from the guest, we clean the
|
||||
* destination buffer so that a device that doesn't test
|
||||
* for errors will not expose qemu internal memory.
|
||||
*/
|
||||
memset(buf, 0, len);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* The translation might be valid for larger regions. */
|
||||
if (plen > len) {
|
||||
plen = len;
|
||||
}
|
||||
|
||||
cpu_physical_memory_rw(paddr, buf, plen,
|
||||
dir == DMA_DIRECTION_FROM_DEVICE);
|
||||
|
||||
len -= plen;
|
||||
addr += plen;
|
||||
buf += plen;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
|
||||
dma_addr_t len)
|
||||
{
|
||||
target_phys_addr_t paddr, plen;
|
||||
int err;
|
||||
|
||||
#ifdef DEBUG_IOMMU
|
||||
fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
|
||||
" len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
|
||||
#endif
|
||||
|
||||
while (len) {
|
||||
err = dma->translate(dma, addr, &paddr, &plen,
|
||||
DMA_DIRECTION_FROM_DEVICE);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
/* The translation might be valid for larger regions. */
|
||||
if (plen > len) {
|
||||
plen = len;
|
||||
}
|
||||
|
||||
do_dma_memory_set(paddr, c, plen);
|
||||
|
||||
len -= plen;
|
||||
addr += plen;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
|
||||
DMAMapFunc map, DMAUnmapFunc unmap)
|
||||
{
|
||||
#ifdef DEBUG_IOMMU
|
||||
fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
|
||||
dma, translate, map, unmap);
|
||||
#endif
|
||||
dma->translate = translate;
|
||||
dma->map = map;
|
||||
dma->unmap = unmap;
|
||||
}
|
||||
|
||||
void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
|
||||
DMADirection dir)
|
||||
{
|
||||
int err;
|
||||
target_phys_addr_t paddr, plen;
|
||||
void *buf;
|
||||
|
||||
if (dma->map) {
|
||||
return dma->map(dma, addr, len, dir);
|
||||
}
|
||||
|
||||
plen = *len;
|
||||
err = dma->translate(dma, addr, &paddr, &plen, dir);
|
||||
if (err) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is true, the virtual region is contiguous,
|
||||
* but the translated physical region isn't. We just
|
||||
* clamp *len, much like cpu_physical_memory_map() does.
|
||||
*/
|
||||
if (plen < *len) {
|
||||
*len = plen;
|
||||
}
|
||||
|
||||
buf = cpu_physical_memory_map(paddr, &plen,
|
||||
dir == DMA_DIRECTION_FROM_DEVICE);
|
||||
*len = plen;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
|
||||
DMADirection dir, dma_addr_t access_len)
|
||||
{
|
||||
if (dma->unmap) {
|
||||
dma->unmap(dma, buffer, len, dir, access_len);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_physical_memory_unmap(buffer, len,
|
||||
dir == DMA_DIRECTION_FROM_DEVICE,
|
||||
access_len);
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue