mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
x86 and machine queue, 2017-05-11
Highlights: * New "-numa cpu" option * NUMA distance configuration * migration/i386 vmstatification -----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJZFLh3AAoJECgHk2+YTcWmppQQAJe9Y5a3VwHXqvHbwBHX2ysn RZDAUPd9DWpbM+UUydyKOVIZ7u5RXbbVq4E0NeCD8VYYd+grZB5Wo1cAzy3b4U2j 2s+MDqaPMtZtGoqxTsyQOVoVxazT5Kf1zglK+iUEzik44J7LGdro+ty2Z7Ut2c11 q9rE/GNS78czBm7c4lxgkxXW4N95K/tEGlLtDQ7uct//3U/ZimF+mO6GcbVFlOWT 4iEbOz2sqvBVv22nLJRufiPgFNIW4hizAz5KBWxwGFCCKvT3N6yYNKKjzEpCw+jE lpjIRODU02yIZZZY841fLRtyrk7p4zORS8jRaHTdEJgb5bGc/YazxxVL8nzRQT1W VxFwAMd+UNrDkV24hpN++Ln2O+b3kwcGZ7uA/qu9d5WvSYUKXlHqcMJ35q6zuhAI /ecfYO7EZfVP86VjIt5IH04iV8RChA9Q6de+kQEFa6wHUxufeCOwCFqukGo8zj07 plX8NcjnzYmSXKnYjHOHao4rKT+DiJhRB60rFiMeKP/qvKbZPjtgsIeonhHm53qZ /QwkhowahHKkpAnetIl0QHm8KS4YudAofMi/Fl+he4gRkEbSQVAo6iQb2L4cjcLC LNSDDsIVWGem4gCR+vcsFqB3lggRDfltHXm15JKh92UMpOr6RI6s8pD55T7EdnPC CfdxWB5kYM6/lLbOHj94 =48wH -----END PGP SIGNATURE----- Merge remote-tracking branch 'ehabkost/tags/x86-and-machine-pull-request' into staging x86 and machine queue, 2017-05-11 Highlights: * New "-numa cpu" option * NUMA distance configuration * migration/i386 vmstatification # gpg: Signature made Thu 11 May 2017 08:16:07 PM BST # gpg: using RSA key 0x2807936F984DC5A6 # gpg: Good signature from "Eduardo Habkost <ehabkost@redhat.com>" # gpg: Note: This key has expired! # Primary key fingerprint: 5A32 2FD5 ABC4 D3DB ACCF D1AA 2807 936F 984D C5A6 * ehabkost/tags/x86-and-machine-pull-request: (29 commits) migration/i386: Remove support for pre-0.12 formats vmstatification: i386 FPReg migration/i386: Remove old non-softfloat 64bit FP support tests: check -numa node,cpu=props_list usecase numa: add '-numa cpu,...' option for property based node mapping numa: remove node_cpu bitmaps as they are no longer used numa: use possible_cpus for not mapped CPUs check machine: call machine init from wrapper numa: remove no longer need numa_post_machine_init() tests: numa: add case for QMP command query-cpus QMP: include CpuInstanceProperties into query_cpus output output virt-arm: get numa node mapping from possible_cpus instead of numa_get_node_for_cpu() spapr: get numa node mapping from possible_cpus instead of numa_get_node_for_cpu() pc: get numa node mapping from possible_cpus instead of numa_get_node_for_cpu() numa: do default mapping based on possible_cpus instead of node_cpu bitmaps numa: mirror cpu to node mapping in MachineState::possible_cpus numa: add check that board supports cpu_index to node mapping virt-arm: add node-id property to CPU pc: add node-id property to CPU spapr: add node-id property to sPAPR core ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
ba9915e1f8
36 changed files with 1153 additions and 426 deletions
310
numa.c
310
numa.c
|
@ -51,6 +51,7 @@ static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
|
|||
* For all nodes, nodeid < max_numa_nodeid
|
||||
*/
|
||||
int nb_numa_nodes;
|
||||
bool have_numa_distance;
|
||||
NodeInfo numa_info[MAX_NODES];
|
||||
|
||||
void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
|
||||
|
@ -140,10 +141,12 @@ uint32_t numa_get_node(ram_addr_t addr, Error **errp)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
|
||||
static void parse_numa_node(MachineState *ms, NumaNodeOptions *node,
|
||||
QemuOpts *opts, Error **errp)
|
||||
{
|
||||
uint16_t nodenr;
|
||||
uint16List *cpus = NULL;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (node->has_nodeid) {
|
||||
nodenr = node->nodeid;
|
||||
|
@ -162,7 +165,12 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!mc->cpu_index_to_instance_props) {
|
||||
error_report("NUMA is not supported by this machine-type");
|
||||
exit(1);
|
||||
}
|
||||
for (cpus = node->cpus; cpus; cpus = cpus->next) {
|
||||
CpuInstanceProperties props;
|
||||
if (cpus->value >= max_cpus) {
|
||||
error_setg(errp,
|
||||
"CPU index (%" PRIu16 ")"
|
||||
|
@ -170,7 +178,10 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
|
|||
cpus->value, max_cpus);
|
||||
return;
|
||||
}
|
||||
bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1);
|
||||
props = mc->cpu_index_to_instance_props(ms, cpus->value);
|
||||
props.node_id = nodenr;
|
||||
props.has_node_id = true;
|
||||
machine_set_cpu_numa_node(ms, &props, &error_fatal);
|
||||
}
|
||||
|
||||
if (node->has_mem && node->has_memdev) {
|
||||
|
@ -212,9 +223,47 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
|
|||
max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
|
||||
}
|
||||
|
||||
static void parse_numa_distance(NumaDistOptions *dist, Error **errp)
|
||||
{
|
||||
uint16_t src = dist->src;
|
||||
uint16_t dst = dist->dst;
|
||||
uint8_t val = dist->val;
|
||||
|
||||
if (src >= MAX_NODES || dst >= MAX_NODES) {
|
||||
error_setg(errp,
|
||||
"Invalid node %" PRIu16
|
||||
", max possible could be %" PRIu16,
|
||||
MAX(src, dst), MAX_NODES);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!numa_info[src].present || !numa_info[dst].present) {
|
||||
error_setg(errp, "Source/Destination NUMA node is missing. "
|
||||
"Please use '-numa node' option to declare it first.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (val < NUMA_DISTANCE_MIN) {
|
||||
error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, "
|
||||
"it shouldn't be less than %d.",
|
||||
val, NUMA_DISTANCE_MIN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (src == dst && val != NUMA_DISTANCE_MIN) {
|
||||
error_setg(errp, "Local distance of node %d should be %d.",
|
||||
src, NUMA_DISTANCE_MIN);
|
||||
return;
|
||||
}
|
||||
|
||||
numa_info[src].distance[dst] = val;
|
||||
have_numa_distance = true;
|
||||
}
|
||||
|
||||
static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
|
||||
{
|
||||
NumaOptions *object = NULL;
|
||||
MachineState *ms = opaque;
|
||||
Error *err = NULL;
|
||||
|
||||
{
|
||||
|
@ -229,12 +278,33 @@ static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
|
|||
|
||||
switch (object->type) {
|
||||
case NUMA_OPTIONS_TYPE_NODE:
|
||||
numa_node_parse(&object->u.node, opts, &err);
|
||||
parse_numa_node(ms, &object->u.node, opts, &err);
|
||||
if (err) {
|
||||
goto end;
|
||||
}
|
||||
nb_numa_nodes++;
|
||||
break;
|
||||
case NUMA_OPTIONS_TYPE_DIST:
|
||||
parse_numa_distance(&object->u.dist, &err);
|
||||
if (err) {
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
case NUMA_OPTIONS_TYPE_CPU:
|
||||
if (!object->u.cpu.has_node_id) {
|
||||
error_setg(&err, "Missing mandatory node-id property");
|
||||
goto end;
|
||||
}
|
||||
if (!numa_info[object->u.cpu.node_id].present) {
|
||||
error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be "
|
||||
"defined with -numa node,nodeid=ID before it's used with "
|
||||
"-numa cpu,node-id=ID", object->u.cpu.node_id);
|
||||
goto end;
|
||||
}
|
||||
|
||||
machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu),
|
||||
&err);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
@ -249,60 +319,118 @@ end:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char *enumerate_cpus(unsigned long *cpus, int max_cpus)
|
||||
/* If all node pair distances are symmetric, then only distances
|
||||
* in one direction are enough. If there is even one asymmetric
|
||||
* pair, though, then all distances must be provided. The
|
||||
* distance from a node to itself is always NUMA_DISTANCE_MIN,
|
||||
* so providing it is never necessary.
|
||||
*/
|
||||
static void validate_numa_distance(void)
|
||||
{
|
||||
int cpu;
|
||||
bool first = true;
|
||||
GString *s = g_string_new(NULL);
|
||||
int src, dst;
|
||||
bool is_asymmetrical = false;
|
||||
|
||||
for (cpu = find_first_bit(cpus, max_cpus);
|
||||
cpu < max_cpus;
|
||||
cpu = find_next_bit(cpus, max_cpus, cpu + 1)) {
|
||||
g_string_append_printf(s, "%s%d", first ? "" : " ", cpu);
|
||||
first = false;
|
||||
}
|
||||
return g_string_free(s, FALSE);
|
||||
}
|
||||
for (src = 0; src < nb_numa_nodes; src++) {
|
||||
for (dst = src; dst < nb_numa_nodes; dst++) {
|
||||
if (numa_info[src].distance[dst] == 0 &&
|
||||
numa_info[dst].distance[src] == 0) {
|
||||
if (src != dst) {
|
||||
error_report("The distance between node %d and %d is "
|
||||
"missing, at least one distance value "
|
||||
"between each nodes should be provided.",
|
||||
src, dst);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
static void validate_numa_cpus(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long *seen_cpus = bitmap_new(max_cpus);
|
||||
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu, max_cpus)) {
|
||||
bitmap_and(seen_cpus, seen_cpus,
|
||||
numa_info[i].node_cpu, max_cpus);
|
||||
error_report("CPU(s) present in multiple NUMA nodes: %s",
|
||||
enumerate_cpus(seen_cpus, max_cpus));
|
||||
g_free(seen_cpus);
|
||||
exit(EXIT_FAILURE);
|
||||
if (numa_info[src].distance[dst] != 0 &&
|
||||
numa_info[dst].distance[src] != 0 &&
|
||||
numa_info[src].distance[dst] !=
|
||||
numa_info[dst].distance[src]) {
|
||||
is_asymmetrical = true;
|
||||
}
|
||||
}
|
||||
bitmap_or(seen_cpus, seen_cpus,
|
||||
numa_info[i].node_cpu, max_cpus);
|
||||
}
|
||||
|
||||
if (!bitmap_full(seen_cpus, max_cpus)) {
|
||||
char *msg;
|
||||
bitmap_complement(seen_cpus, seen_cpus, max_cpus);
|
||||
msg = enumerate_cpus(seen_cpus, max_cpus);
|
||||
error_report("warning: CPU(s) not present in any NUMA nodes: %s", msg);
|
||||
error_report("warning: All CPU(s) up to maxcpus should be described "
|
||||
"in NUMA config");
|
||||
g_free(msg);
|
||||
if (is_asymmetrical) {
|
||||
for (src = 0; src < nb_numa_nodes; src++) {
|
||||
for (dst = 0; dst < nb_numa_nodes; dst++) {
|
||||
if (src != dst && numa_info[src].distance[dst] == 0) {
|
||||
error_report("At least one asymmetrical pair of "
|
||||
"distances is given, please provide distances "
|
||||
"for both directions of all node pairs.");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
g_free(seen_cpus);
|
||||
}
|
||||
|
||||
void parse_numa_opts(MachineClass *mc)
|
||||
static void complete_init_numa_distance(void)
|
||||
{
|
||||
int src, dst;
|
||||
|
||||
/* Fixup NUMA distance by symmetric policy because if it is an
|
||||
* asymmetric distance table, it should be a complete table and
|
||||
* there would not be any missing distance except local node, which
|
||||
* is verified by validate_numa_distance above.
|
||||
*/
|
||||
for (src = 0; src < nb_numa_nodes; src++) {
|
||||
for (dst = 0; dst < nb_numa_nodes; dst++) {
|
||||
if (numa_info[src].distance[dst] == 0) {
|
||||
if (src == dst) {
|
||||
numa_info[src].distance[dst] = NUMA_DISTANCE_MIN;
|
||||
} else {
|
||||
numa_info[src].distance[dst] = numa_info[dst].distance[src];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
|
||||
int nb_nodes, ram_addr_t size)
|
||||
{
|
||||
int i;
|
||||
uint64_t usedmem = 0;
|
||||
|
||||
for (i = 0; i < MAX_NODES; i++) {
|
||||
numa_info[i].node_cpu = bitmap_new(max_cpus);
|
||||
/* Align each node according to the alignment
|
||||
* requirements of the machine class
|
||||
*/
|
||||
|
||||
for (i = 0; i < nb_nodes - 1; i++) {
|
||||
nodes[i].node_mem = (size / nb_nodes) &
|
||||
~((1 << mc->numa_mem_align_shift) - 1);
|
||||
usedmem += nodes[i].node_mem;
|
||||
}
|
||||
nodes[i].node_mem = size - usedmem;
|
||||
}
|
||||
|
||||
if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) {
|
||||
void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
|
||||
int nb_nodes, ram_addr_t size)
|
||||
{
|
||||
int i;
|
||||
uint64_t usedmem = 0, node_mem;
|
||||
uint64_t granularity = size / nb_nodes;
|
||||
uint64_t propagate = 0;
|
||||
|
||||
for (i = 0; i < nb_nodes - 1; i++) {
|
||||
node_mem = (granularity + propagate) &
|
||||
~((1 << mc->numa_mem_align_shift) - 1);
|
||||
propagate = granularity + propagate - node_mem;
|
||||
nodes[i].node_mem = node_mem;
|
||||
usedmem += node_mem;
|
||||
}
|
||||
nodes[i].node_mem = size - usedmem;
|
||||
}
|
||||
|
||||
void parse_numa_opts(MachineState *ms)
|
||||
{
|
||||
int i;
|
||||
const CPUArchIdList *possible_cpus;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -336,17 +464,8 @@ void parse_numa_opts(MachineClass *mc)
|
|||
}
|
||||
}
|
||||
if (i == nb_numa_nodes) {
|
||||
uint64_t usedmem = 0;
|
||||
|
||||
/* Align each node according to the alignment
|
||||
* requirements of the machine class
|
||||
*/
|
||||
for (i = 0; i < nb_numa_nodes - 1; i++) {
|
||||
numa_info[i].node_mem = (ram_size / nb_numa_nodes) &
|
||||
~((1 << mc->numa_mem_align_shift) - 1);
|
||||
usedmem += numa_info[i].node_mem;
|
||||
}
|
||||
numa_info[i].node_mem = ram_size - usedmem;
|
||||
assert(mc->numa_auto_assign_ram);
|
||||
mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size);
|
||||
}
|
||||
|
||||
numa_total = 0;
|
||||
|
@ -366,50 +485,55 @@ void parse_numa_opts(MachineClass *mc)
|
|||
|
||||
numa_set_mem_ranges();
|
||||
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (!bitmap_empty(numa_info[i].node_cpu, max_cpus)) {
|
||||
/* assign CPUs to nodes using board provided default mapping */
|
||||
if (!mc->cpu_index_to_instance_props || !mc->possible_cpu_arch_ids) {
|
||||
error_report("default CPUs to NUMA node mapping isn't supported");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
possible_cpus = mc->possible_cpu_arch_ids(ms);
|
||||
for (i = 0; i < possible_cpus->len; i++) {
|
||||
if (possible_cpus->cpus[i].props.has_node_id) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Historically VCPUs were assigned in round-robin order to NUMA
|
||||
* nodes. However it causes issues with guest not handling it nice
|
||||
* in case where cores/threads from a multicore CPU appear on
|
||||
* different nodes. So allow boards to override default distribution
|
||||
* rule grouping VCPUs by socket so that VCPUs from the same socket
|
||||
* would be on the same node.
|
||||
*/
|
||||
if (i == nb_numa_nodes) {
|
||||
for (i = 0; i < max_cpus; i++) {
|
||||
unsigned node_id = i % nb_numa_nodes;
|
||||
if (mc->cpu_index_to_socket_id) {
|
||||
node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes;
|
||||
}
|
||||
|
||||
set_bit(i, numa_info[node_id].node_cpu);
|
||||
/* no CPUs are assigned to NUMA nodes */
|
||||
if (i == possible_cpus->len) {
|
||||
for (i = 0; i < max_cpus; i++) {
|
||||
CpuInstanceProperties props;
|
||||
/* fetch default mapping from board and enable it */
|
||||
props = mc->cpu_index_to_instance_props(ms, i);
|
||||
props.has_node_id = true;
|
||||
|
||||
machine_set_cpu_numa_node(ms, &props, &error_fatal);
|
||||
}
|
||||
}
|
||||
|
||||
validate_numa_cpus();
|
||||
/* QEMU needs at least all unique node pair distances to build
|
||||
* the whole NUMA distance table. QEMU treats the distance table
|
||||
* as symmetric by default, i.e. distance A->B == distance B->A.
|
||||
* Thus, QEMU is able to complete the distance table
|
||||
* initialization even though only distance A->B is provided and
|
||||
* distance B->A is not. QEMU knows the distance of a node to
|
||||
* itself is always 10, so A->A distances may be omitted. When
|
||||
* the distances of two nodes of a pair differ, i.e. distance
|
||||
* A->B != distance B->A, then that means the distance table is
|
||||
* asymmetric. In this case, the distances for both directions
|
||||
* of all node pairs are required.
|
||||
*/
|
||||
if (have_numa_distance) {
|
||||
/* Validate enough NUMA distance information was provided. */
|
||||
validate_numa_distance();
|
||||
|
||||
/* Validation succeeded, now fill in any missing distances. */
|
||||
complete_init_numa_distance();
|
||||
}
|
||||
} else {
|
||||
numa_set_mem_node_id(0, ram_size, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void numa_post_machine_init(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
int i;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
assert(cpu->cpu_index < max_cpus);
|
||||
if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) {
|
||||
cpu->numa_node = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
|
||||
const char *name,
|
||||
uint64_t ram_size)
|
||||
|
@ -560,20 +684,6 @@ MemdevList *qmp_query_memdev(Error **errp)
|
|||
return list;
|
||||
}
|
||||
|
||||
int numa_get_node_for_cpu(int idx)
|
||||
{
|
||||
int i;
|
||||
|
||||
assert(idx < max_cpus);
|
||||
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (test_bit(idx, numa_info[i].node_cpu)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
void ram_block_notifier_add(RAMBlockNotifier *n)
|
||||
{
|
||||
QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue