Update Linux headers to 5.13-rc4

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20210603191541.2862286-1-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
This commit is contained in:
Eduardo Habkost 2021-06-03 15:15:41 -04:00
parent 18e53dff93
commit 278f064e45
25 changed files with 2707 additions and 2046 deletions

View file

@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
#include <linux/const.h>
#include <linux/types.h>
#include <linux/ioctl.h>
@ -216,6 +217,20 @@ struct kvm_hyperv_exit {
} u;
};
struct kvm_xen_exit {
#define KVM_EXIT_XEN_HCALL 1
__u32 type;
union {
struct {
__u32 longmode;
__u32 cpl;
__u64 input;
__u64 result;
__u64 params[6];
} hcall;
} u;
};
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
@ -251,6 +266,9 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_X86_RDMSR 29
#define KVM_EXIT_X86_WRMSR 30
#define KVM_EXIT_DIRTY_RING_FULL 31
#define KVM_EXIT_AP_RESET_HOLD 32
#define KVM_EXIT_X86_BUS_LOCK 33
#define KVM_EXIT_XEN 34
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@ -427,6 +445,8 @@ struct kvm_run {
__u32 index; /* kernel -> user */
__u64 data; /* kernel <-> user */
} msr;
/* KVM_EXIT_XEN */
struct kvm_xen_exit xen;
/* Fix the size of the union. */
char padding[256];
};
@ -573,6 +593,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
#define KVM_MP_STATE_AP_RESET_HOLD 9
struct kvm_mp_state {
__u32 mp_state;
@ -1056,6 +1077,12 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
#define KVM_CAP_SYS_HYPERV_CPUID 191
#define KVM_CAP_DIRTY_LOG_RING 192
#define KVM_CAP_X86_BUS_LOCK_EXIT 193
#define KVM_CAP_PPC_DAWR1 194
#define KVM_CAP_SET_GUEST_DEBUG2 195
#define KVM_CAP_SGX_ATTRIBUTE 196
#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
#define KVM_CAP_PTP_KVM 198
#ifdef KVM_CAP_IRQ_ROUTING
@ -1129,6 +1156,11 @@ struct kvm_x86_mce {
#endif
#ifdef KVM_CAP_XEN_HVM
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
@ -1563,6 +1595,57 @@ struct kvm_pv_cmd {
/* Available with KVM_CAP_DIRTY_LOG_RING */
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
/* Per-VM Xen attributes */
#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
struct kvm_xen_hvm_attr {
__u16 type;
__u16 pad[3];
union {
__u8 long_mode;
__u8 vector;
struct {
__u64 gfn;
} shared_info;
__u64 pad[8];
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
/* Per-vCPU Xen attributes */
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
union {
__u64 gpa;
__u64 pad[8];
struct {
__u64 state;
__u64 state_entry_time;
__u64 time_running;
__u64 time_runnable;
__u64 time_blocked;
__u64 time_offline;
} runstate;
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
@ -1593,6 +1676,8 @@ enum sev_cmd_id {
KVM_SEV_CERT_EXPORT,
/* Attestation report */
KVM_SEV_GET_ATTESTATION_REPORT,
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
KVM_SEV_NR_MAX,
};
@ -1651,6 +1736,45 @@ struct kvm_sev_attestation_report {
__u32 len;
};
struct kvm_sev_send_start {
__u32 policy;
__u64 pdh_cert_uaddr;
__u32 pdh_cert_len;
__u64 plat_certs_uaddr;
__u32 plat_certs_len;
__u64 amd_certs_uaddr;
__u32 amd_certs_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_send_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
struct kvm_sev_receive_start {
__u32 handle;
__u32 policy;
__u64 pdh_uaddr;
__u32 pdh_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_receive_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
@ -1756,8 +1880,8 @@ struct kvm_hyperv_eventfd {
* conversion after harvesting an entry. Also, it must not skip any
* dirty bits, so that dirty bits are always harvested in sequence.
*/
#define KVM_DIRTY_GFN_F_DIRTY BIT(0)
#define KVM_DIRTY_GFN_F_RESET BIT(1)
#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
#define KVM_DIRTY_GFN_F_MASK 0x3
/*
@ -1772,4 +1896,7 @@ struct kvm_dirty_gfn {
__u64 offset;
};
#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
#endif /* __LINUX_KVM_H */

View file

@ -19,15 +19,19 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \
UFFDIO_REGISTER_MODE_WP | \
UFFDIO_REGISTER_MODE_MINOR)
#define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM | \
UFFD_FEATURE_SIGBUS | \
UFFD_FEATURE_THREAD_ID)
UFFD_FEATURE_THREAD_ID | \
UFFD_FEATURE_MINOR_HUGETLBFS)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@ -36,10 +40,12 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
(__u64)1 << _UFFDIO_WRITEPROTECT)
(__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY)
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_CONTINUE)
/*
* Valid ioctl command number range with this API is from 0x00 to
@ -55,6 +61,7 @@
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@ -73,6 +80,8 @@
struct uffdio_zeropage)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \
struct uffdio_continue)
/* read() structure */
struct uffd_msg {
@ -127,6 +136,7 @@ struct uffd_msg {
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */
struct uffdio_api {
/* userland asks for an API number and the features to enable */
@ -171,6 +181,10 @@ struct uffdio_api {
*
* UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will
* be returned, if feature is not requested 0 will be returned.
*
* UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults
* can be intercepted (via REGISTER_MODE_MINOR) for
* hugetlbfs-backed pages.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@ -181,6 +195,7 @@ struct uffdio_api {
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
#define UFFD_FEATURE_SIGBUS (1<<7)
#define UFFD_FEATURE_THREAD_ID (1<<8)
#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9)
__u64 features;
__u64 ioctls;
@ -195,6 +210,7 @@ struct uffdio_register {
struct uffdio_range range;
#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2)
__u64 mode;
/*
@ -257,6 +273,18 @@ struct uffdio_writeprotect {
__u64 mode;
};
struct uffdio_continue {
struct uffdio_range range;
#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0)
__u64 mode;
/*
* Fields below here are written by the ioctl and must be at the end:
* the copy_from_user will not read past here.
*/
__s64 mapped;
};
/*
* Flags for the userfaultfd(2) system call itself.
*/

View file

@ -46,6 +46,12 @@
*/
#define VFIO_NOIOMMU_IOMMU 8
/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
#define VFIO_UNMAP_ALL 9
/* Supports the vaddr flag for DMA map and unmap */
#define VFIO_UPDATE_VADDR 10
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@ -329,6 +335,8 @@ struct vfio_region_info_cap_type {
/* 10de vendor PCI sub-types */
/*
* NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
*
* Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
@ -336,6 +344,8 @@ struct vfio_region_info_cap_type {
/*
* IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
* to do TLB invalidation on a GPU.
*
* Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
@ -635,6 +645,8 @@ struct vfio_device_migration_info {
* Capability with compressed real address (aka SSA - small system address)
* where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
* and by the userspace to associate a NVLink bridge with a GPU.
*
* Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
@ -649,6 +661,8 @@ struct vfio_region_info_cap_nvlink2_ssatgt {
* property in the device tree. The value is fixed in the hardware
* and failing to provide the correct value results in the link
* not working with no indication from the driver why.
*
* Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
@ -1074,12 +1088,22 @@ struct vfio_iommu_type1_info_dma_avail {
*
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
*
* If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
* unblock translation of host virtual addresses in the iova range. The vaddr
* must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
* maintain memory consistency within the user application, the updated vaddr
* must address the same memory object as originally mapped. Failure to do so
* will result in user memory corruption and/or device misbehavior. iova and
* size must match those in the original MAP_DMA call. Protection is not
* changed, and the READ & WRITE flags must be 0.
*/
struct vfio_iommu_type1_dma_map {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
__u64 vaddr; /* Process virtual address */
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
@ -1102,6 +1126,7 @@ struct vfio_bitmap {
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
*
* VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
* before unmapping IO virtual addresses. When this flag is set, the user must
* provide a struct vfio_bitmap in data[]. User must provide zero-allocated
@ -1111,11 +1136,21 @@ struct vfio_bitmap {
* indicates that the page at that offset from iova is dirty. A Bitmap of the
* pages in the range of unmapped size is returned in the user-provided
* vfio_bitmap.data.
*
* If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size
* must be 0. This cannot be combined with the get-dirty-bitmap flag.
*
* If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
* virtual addresses in the iova range. Tasks that attempt to translate an
* iova's vaddr will block. DMA to already-mapped pages continues. This
* cannot be combined with the get-dirty-bitmap flag.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1)
#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2)
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
__u8 data[];