mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-28 04:43:54 -06:00

The tuxrun tests send a series of strings to the guest to login and then run commands. Since we have been unable to match on console output that isn't followed by a newline, the test used many time.sleep() statements to pretend to synchronize with the guest. This has proved to be unreliable for the aarch64be instance of the tuxrun tests, with the test often hanging. The hang is a very subtle timing problem, and it is suspected that some (otherwise apparently harmless) I/O error messages could be resulting in full FIFO buffers, stalling interaction with the guest. With the newly rewritten console interaction able to match strings that don't have a following newline, the tux run tests can now match directly on the login prompt, and/or shell PS1 prompt. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2689 Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20241121154218.1423005-17-berrange@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20241121165806.476008-17-alex.bennee@linaro.org>
159 lines
5.3 KiB
Python
159 lines
5.3 KiB
Python
# Functional test that boots known good tuxboot images the same way
|
|
# that tuxrun (www.tuxrun.org) does. This tool is used by things like
|
|
# the LKFT project to run regression tests on kernels.
|
|
#
|
|
# Copyright (c) 2023 Linaro Ltd.
|
|
#
|
|
# Author:
|
|
# Alex Bennée <alex.bennee@linaro.org>
|
|
#
|
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
import os
|
|
import stat
|
|
import time
|
|
|
|
from qemu_test import QemuSystemTest
|
|
from qemu_test import exec_command, exec_command_and_wait_for_pattern
|
|
from qemu_test import wait_for_console_pattern
|
|
from qemu_test import has_cmd, run_cmd, get_qemu_img
|
|
|
|
class TuxRunBaselineTest(QemuSystemTest):
|
|
|
|
KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
|
|
# Tests are ~10-40s, allow for --debug/--enable-gcov overhead
|
|
timeout = 100
|
|
|
|
def get_tag(self, tagname, default=None):
|
|
"""
|
|
Get the metadata tag or return the default.
|
|
"""
|
|
utag = self._get_unique_tag_val(tagname)
|
|
print(f"{tagname}/{default} -> {utag}")
|
|
if utag:
|
|
return utag
|
|
|
|
return default
|
|
|
|
def setUp(self):
|
|
super().setUp()
|
|
|
|
# We need zstd for all the tuxrun tests
|
|
(has_zstd, msg) = has_cmd('zstd')
|
|
if has_zstd is False:
|
|
self.skipTest(msg)
|
|
self.zstd = 'zstd'
|
|
|
|
# Pre-init TuxRun specific settings: Most machines work with
|
|
# reasonable defaults but we sometimes need to tweak the
|
|
# config. To avoid open coding everything we store all these
|
|
# details in the metadata for each test.
|
|
|
|
# The tuxboot tag matches the root directory
|
|
self.tuxboot = self.arch
|
|
|
|
# Most Linux's use ttyS0 for their serial port
|
|
self.console = "ttyS0"
|
|
|
|
# Does the machine shutdown QEMU nicely on "halt"
|
|
self.wait_for_shutdown = True
|
|
|
|
self.root = "vda"
|
|
|
|
# Occasionally we need extra devices to hook things up
|
|
self.extradev = None
|
|
|
|
self.qemu_img = get_qemu_img(self)
|
|
|
|
def wait_for_console_pattern(self, success_message, vm=None):
|
|
wait_for_console_pattern(self, success_message,
|
|
failure_message='Kernel panic - not syncing',
|
|
vm=vm)
|
|
|
|
def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None):
|
|
"""
|
|
Fetch the TuxBoot assets.
|
|
"""
|
|
kernel_image = kernel_asset.fetch()
|
|
disk_image_zst = rootfs_asset.fetch()
|
|
|
|
disk_image = self.workdir + "/rootfs.ext4"
|
|
|
|
run_cmd([self.zstd, "-f", "-d", disk_image_zst,
|
|
"-o", disk_image])
|
|
# zstd copies source archive permissions for the output
|
|
# file, so must make this writable for QEMU
|
|
os.chmod(disk_image, stat.S_IRUSR | stat.S_IWUSR)
|
|
|
|
dtb = dtb_asset.fetch() if dtb_asset is not None else None
|
|
|
|
return (kernel_image, disk_image, dtb)
|
|
|
|
def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
|
|
"""
|
|
Setup to run and add the common parameters to the system
|
|
"""
|
|
self.vm.set_console(console_index=console_index)
|
|
|
|
# all block devices are raw ext4's
|
|
blockdev = "driver=raw,file.driver=file," \
|
|
+ f"file.filename={disk},node-name=hd0"
|
|
|
|
kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
|
|
kcmd_line += f" root=/dev/{self.root}"
|
|
kcmd_line += f" console={self.console}"
|
|
|
|
self.vm.add_args('-kernel', kernel,
|
|
'-append', kcmd_line,
|
|
'-blockdev', blockdev)
|
|
|
|
# Sometimes we need extra devices attached
|
|
if self.extradev:
|
|
self.vm.add_args('-device', self.extradev)
|
|
|
|
self.vm.add_args('-device',
|
|
f"{drive},drive=hd0")
|
|
|
|
# Some machines need an explicit DTB
|
|
if dtb:
|
|
self.vm.add_args('-dtb', dtb)
|
|
|
|
def run_tuxtest_tests(self, haltmsg):
|
|
"""
|
|
Wait for the system to boot up, wait for the login prompt and
|
|
then do a few things on the console. Trigger a shutdown and
|
|
wait to exit cleanly.
|
|
"""
|
|
ps1='root@tuxtest:~#'
|
|
self.wait_for_console_pattern('tuxtest login:')
|
|
exec_command_and_wait_for_pattern(self, 'root', ps1)
|
|
exec_command_and_wait_for_pattern(self, 'cat /proc/interrupts', ps1)
|
|
exec_command_and_wait_for_pattern(self, 'cat /proc/self/maps', ps1)
|
|
exec_command_and_wait_for_pattern(self, 'uname -a', ps1)
|
|
exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
|
|
|
|
# Wait for VM to shut down gracefully if it can
|
|
if self.wait_for_shutdown:
|
|
self.vm.wait()
|
|
else:
|
|
self.vm.shutdown()
|
|
|
|
def common_tuxrun(self,
|
|
kernel_asset,
|
|
rootfs_asset,
|
|
dtb_asset=None,
|
|
drive="virtio-blk-device",
|
|
haltmsg="reboot: System halted",
|
|
console_index=0):
|
|
"""
|
|
Common path for LKFT tests. Unless we need to do something
|
|
special with the command line we can process most things using
|
|
the tag metadata.
|
|
"""
|
|
(kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset,
|
|
dtb_asset)
|
|
|
|
self.prepare_run(kernel, disk, drive, dtb, console_index)
|
|
self.vm.launch()
|
|
self.run_tuxtest_tests(haltmsg)
|
|
os.remove(disk)
|