Linux headers: update

Update against Linux 5.8-rc1.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
This commit is contained in:
Cornelia Huck 2020-06-09 16:26:53 +02:00
parent 26bf4a2921
commit f76b348ec7
25 changed files with 818 additions and 33 deletions

View file

@ -31,6 +31,7 @@
#define KVM_FEATURE_PV_SEND_IPI 11
#define KVM_FEATURE_POLL_CONTROL 12
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
#define KVM_HINTS_REALTIME 0
@ -50,6 +51,8 @@
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
struct kvm_steal_time {
uint64_t steal;
@ -81,6 +84,11 @@ struct kvm_clock_pairing {
#define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
#define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2)
#define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3)
/* MSR_KVM_ASYNC_PF_INT */
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@ -112,8 +120,13 @@ struct kvm_mmu_op_release_pt {
#define KVM_PV_REASON_PAGE_READY 2
struct kvm_vcpu_pv_apf_data {
uint32_t reason;
uint8_t pad[60];
/* Used for 'page not present' events delivered via #PF */
uint32_t flags;
/* Used for 'page ready' events delivered via interrupt notification */
uint32_t token;
uint8_t pad[56];
uint32_t enabled;
};

View file

@ -353,9 +353,12 @@ extern "C" {
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
* Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
* On earlier platforms the is highly platforms specific and not useful for
* cross-driver sharing. It exists since on a given platform it does uniquely
* identify the layout in a simple way for i915-specific userspace, which
* facilitated conversion of userspace to modifiers. Additionally the exact
* format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
@ -368,9 +371,12 @@ extern "C" {
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
* Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
* On earlier platforms the is highly platforms specific and not useful for
* cross-driver sharing. It exists since on a given platform it does uniquely
* identify the layout in a simple way for i915-specific userspace, which
* facilitated conversion of userspace to modifiers. Additionally the exact
* format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
@ -520,7 +526,113 @@ extern "C" {
#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
/*
* 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
* Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
* and Tegra GPUs starting with Tegra K1.
*
* Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
* based on the architecture generation. GOBs themselves are then arranged in
* 3D blocks, with the block dimensions (in terms of GOBs) always being a power
* of two, and hence expressible as their log2 equivalent (E.g., "2" represents
* a block depth or height of "4").
*
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
* in full detail.
*
* Macro
* Bits Param Description
* ---- ----- -----------------------------------------------------------------
*
* 3:0 h log2(height) of each block, in GOBs. Placed here for
* compatibility with the existing
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
*
* 4:4 - Must be 1, to indicate block-linear layout. Necessary for
* compatibility with the existing
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
*
* 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
* size). Must be zero.
*
* Note there is no log2(width) parameter. Some portions of the
* hardware support a block width of two gobs, but it is impractical
* to use due to lack of support elsewhere, and has no known
* benefits.
*
* 11:9 - Reserved (To support 2D-array textures with variable array stride
* in blocks, specified via log2(tile width in blocks)). Must be
* zero.
*
* 19:12 k Page Kind. This value directly maps to a field in the page
* tables of all GPUs >= NV50. It affects the exact layout of bits
* in memory and can be derived from the tuple
*
* (format, GPU model, compression type, samples per pixel)
*
* Where compression type is defined below. If GPU model were
* implied by the format modifier, format, or memory buffer, page
* kind would not need to be included in the modifier itself, but
* since the modifier should define the layout of the associated
* memory buffer independent from any device or other context, it
* must be included here.
*
* 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
* starting with Fermi GPUs. Additionally, the mapping between page
* kind and bit layout has changed at various points.
*
* 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
* 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
* 2 = Gob Height 8, Turing+ Page Kind mapping
* 3 = Reserved for future use.
*
* 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
* bit remapping step that occurs at an even lower level than the
* page kind and block linear swizzles. This causes the layout of
* surfaces mapped in those SOC's GPUs to be incompatible with the
* equivalent mapping on other GPUs in the same system.
*
* 0 = Tegra K1 - Tegra Parker/TX2 Layout.
* 1 = Desktop GPU and Tegra Xavier+ Layout
*
* 25:23 c Lossless Framebuffer Compression type.
*
* 0 = none
* 1 = ROP/3D, layout 1, exact compression format implied by Page
* Kind field
* 2 = ROP/3D, layout 2, exact compression format implied by Page
* Kind field
* 3 = CDE horizontal
* 4 = CDE vertical
* 5 = Reserved for future use
* 6 = Reserved for future use
* 7 = Reserved for future use
*
* 55:25 - Reserved for future use. Must be zero.
*/
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
fourcc_mod_code(NVIDIA, (0x10 | \
((h) & 0xf) | \
(((k) & 0xff) << 12) | \
(((g) & 0x3) << 20) | \
(((s) & 0x1) << 22) | \
(((c) & 0x7) << 23)))
/* To grandfather in prior block linear format modifiers to the above layout,
* the page kind "0", which corresponds to "pitch/linear" and hence is unusable
* with block-linear layouts, is remapped within drivers to the value 0xfe,
* which corresponds to the "generic" kind used for simple single-sample
* uncompressed color formats on Fermi - Volta GPUs.
*/
static inline uint64_t
drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
{
if (!(modifier & 0x10) || (modifier & (0xff << 12)))
return modifier;
else
return modifier | (0xfe << 12);
}
/*
* 16Bx2 Block Linear layout, used by Tegra K1 and later
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
@ -541,20 +653,20 @@ extern "C" {
* in full detail.
*/
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
fourcc_mod_code(NVIDIA, 0x10)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
fourcc_mod_code(NVIDIA, 0x11)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
fourcc_mod_code(NVIDIA, 0x12)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
fourcc_mod_code(NVIDIA, 0x13)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
fourcc_mod_code(NVIDIA, 0x14)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
fourcc_mod_code(NVIDIA, 0x15)
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
/*
* Some Broadcom modifiers take parameters, for example the number of

View file

@ -1666,6 +1666,18 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
return 0;
}
#define MASTER_SLAVE_CFG_UNSUPPORTED 0
#define MASTER_SLAVE_CFG_UNKNOWN 1
#define MASTER_SLAVE_CFG_MASTER_PREFERRED 2
#define MASTER_SLAVE_CFG_SLAVE_PREFERRED 3
#define MASTER_SLAVE_CFG_MASTER_FORCE 4
#define MASTER_SLAVE_CFG_SLAVE_FORCE 5
#define MASTER_SLAVE_STATE_UNSUPPORTED 0
#define MASTER_SLAVE_STATE_UNKNOWN 1
#define MASTER_SLAVE_STATE_MASTER 2
#define MASTER_SLAVE_STATE_SLAVE 3
#define MASTER_SLAVE_STATE_ERR 4
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@ -1904,7 +1916,9 @@ struct ethtool_link_settings {
uint8_t eth_tp_mdix_ctrl;
int8_t link_mode_masks_nwords;
uint8_t transceiver;
uint8_t reserved1[3];
uint8_t master_slave_cfg;
uint8_t master_slave_state;
uint8_t reserved1[1];
uint32_t reserved[7];
uint32_t link_mode_masks[0];
/* layout of link_mode_masks fields:

View file

@ -44,6 +44,7 @@
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
#define VIRTIO_ID_MEM 24 /* virtio mem */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */

View file

@ -0,0 +1,211 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Virtio Mem Device
*
* Copyright Red Hat, Inc. 2020
*
* Authors:
* David Hildenbrand <david@redhat.com>
*
* This header is BSD licensed so anyone can use the definitions
* to implement compatible drivers/servers:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LINUX_VIRTIO_MEM_H
#define _LINUX_VIRTIO_MEM_H
#include "standard-headers/linux/types.h"
#include "standard-headers/linux/virtio_types.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
/*
* Each virtio-mem device manages a dedicated region in physical address
* space. Each device can belong to a single NUMA node, multiple devices
* for a single NUMA node are possible. A virtio-mem device is like a
* "resizable DIMM" consisting of small memory blocks that can be plugged
* or unplugged. The device driver is responsible for (un)plugging memory
* blocks on demand.
*
* Virtio-mem devices can only operate on their assigned memory region in
* order to (un)plug memory. A device cannot (un)plug memory belonging to
* other devices.
*
* The "region_size" corresponds to the maximum amount of memory that can
* be provided by a device. The "size" corresponds to the amount of memory
* that is currently plugged. "requested_size" corresponds to a request
* from the device to the device driver to (un)plug blocks. The
* device driver should try to (un)plug blocks in order to reach the
* "requested_size". It is impossible to plug more memory than requested.
*
* The "usable_region_size" represents the memory region that can actually
* be used to (un)plug memory. It is always at least as big as the
* "requested_size" and will grow dynamically. It will only shrink when
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
* read/written. Such memory should, in general, not be touched. E.g.,
* even writing might succeed, but the values will simply be discarded at
* random points in time.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
*
* Usually, during system resets all memory will get unplugged, so the
* device driver can start with a clean state. However, in specific
* scenarios (if the device is busy) it can happen that the device still
* has memory plugged. The device driver can request to unplug all memory
* (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
* device is busy.
*/
/* --- virtio-mem: feature bits --- */
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
/* --- virtio-mem: guest -> host requests --- */
/* request to plug memory blocks */
#define VIRTIO_MEM_REQ_PLUG 0
/* request to unplug memory blocks */
#define VIRTIO_MEM_REQ_UNPLUG 1
/* request to unplug all blocks and shrink the usable size */
#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
/* request information about the plugged state of memory blocks */
#define VIRTIO_MEM_REQ_STATE 3
struct virtio_mem_req_plug {
__virtio64 addr;
__virtio16 nb_blocks;
__virtio16 padding[3];
};
struct virtio_mem_req_unplug {
__virtio64 addr;
__virtio16 nb_blocks;
__virtio16 padding[3];
};
struct virtio_mem_req_state {
__virtio64 addr;
__virtio16 nb_blocks;
__virtio16 padding[3];
};
struct virtio_mem_req {
__virtio16 type;
__virtio16 padding[3];
union {
struct virtio_mem_req_plug plug;
struct virtio_mem_req_unplug unplug;
struct virtio_mem_req_state state;
} u;
};
/* --- virtio-mem: host -> guest response --- */
/*
* Request processed successfully, applicable for
* - VIRTIO_MEM_REQ_PLUG
* - VIRTIO_MEM_REQ_UNPLUG
* - VIRTIO_MEM_REQ_UNPLUG_ALL
* - VIRTIO_MEM_REQ_STATE
*/
#define VIRTIO_MEM_RESP_ACK 0
/*
* Request denied - e.g. trying to plug more than requested, applicable for
* - VIRTIO_MEM_REQ_PLUG
*/
#define VIRTIO_MEM_RESP_NACK 1
/*
* Request cannot be processed right now, try again later, applicable for
* - VIRTIO_MEM_REQ_PLUG
* - VIRTIO_MEM_REQ_UNPLUG
* - VIRTIO_MEM_REQ_UNPLUG_ALL
*/
#define VIRTIO_MEM_RESP_BUSY 2
/*
* Error in request (e.g. addresses/alignment), applicable for
* - VIRTIO_MEM_REQ_PLUG
* - VIRTIO_MEM_REQ_UNPLUG
* - VIRTIO_MEM_REQ_STATE
*/
#define VIRTIO_MEM_RESP_ERROR 3
/* State of memory blocks is "plugged" */
#define VIRTIO_MEM_STATE_PLUGGED 0
/* State of memory blocks is "unplugged" */
#define VIRTIO_MEM_STATE_UNPLUGGED 1
/* State of memory blocks is "mixed" */
#define VIRTIO_MEM_STATE_MIXED 2
struct virtio_mem_resp_state {
__virtio16 state;
};
struct virtio_mem_resp {
__virtio16 type;
__virtio16 padding[3];
union {
struct virtio_mem_resp_state state;
} u;
};
/* --- virtio-mem: configuration --- */
struct virtio_mem_config {
/* Block size and alignment. Cannot change. */
uint64_t block_size;
/* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
uint16_t node_id;
uint8_t padding[6];
/* Start address of the memory region. Cannot change. */
uint64_t addr;
/* Region size (maximum). Cannot change. */
uint64_t region_size;
/*
* Currently usable region size. Can grow up to region_size. Can
* shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
* update will be sent).
*/
uint64_t usable_region_size;
/*
* Currently used size. Changes due to plug/unplug requests, but no
* config updates will be sent.
*/
uint64_t plugged_size;
/* Requested size. New plug requests cannot exceed it. Can change. */
uint64_t requested_size;
};
#endif /* _LINUX_VIRTIO_MEM_H */

View file

@ -84,6 +84,13 @@
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
/* Alignment requirements for vring elements.
* When using pre-virtio 1.0 layout, these fall out naturally.
*/
#define VRING_AVAIL_ALIGN_SIZE 2
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
@ -110,28 +117,47 @@ struct vring_used_elem {
__virtio32 len;
};
typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
vring_used_elem_t;
struct vring_used {
__virtio16 flags;
__virtio16 idx;
struct vring_used_elem ring[];
vring_used_elem_t ring[];
};
/*
* The ring element addresses are passed between components with different
* alignments assumptions. Thus, we might need to decrease the compiler-selected
* alignment, and so must use a typedef to make sure the aligned attribute
* actually takes hold:
*
* https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
*
* When used on a struct, or struct member, the aligned attribute can only
* increase the alignment; in order to decrease it, the packed attribute must
* be specified as well. When used as part of a typedef, the aligned attribute
* can both increase and decrease alignment, and specifying the packed
* attribute generates a warning.
*/
typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
vring_desc_t;
typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
vring_avail_t;
typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
vring_used_t;
struct vring {
unsigned int num;
struct vring_desc *desc;
vring_desc_t *desc;
struct vring_avail *avail;
vring_avail_t *avail;
struct vring_used *used;
vring_used_t *used;
};
/* Alignment requirements for vring elements.
* When using pre-virtio 1.0 layout, these fall out naturally.
*/
#define VRING_AVAIL_ALIGN_SIZE 2
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
#ifndef VIRTIO_RING_NO_LEGACY
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
@ -179,6 +205,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
#endif /* VIRTIO_RING_NO_LEGACY */
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,