mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 02:03:56 -06:00
Block layer patches (rebased Stefan's pull request)
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJWRLF4AAoJEH8JsnLIjy/WpIcQAK+gSTtFQA8EyWqYVENyTDcI MTrvWI3p87XGTO11jNS8zJQIXGjenwM0jfAVoUAUHv+i9sV+/XORK1txpDZF3dhU Oy+16owb75e1mbBX+PYilU2SG/Uy5m81O4EX5yFXelh4xdTG1yz+ZcZjc6sKno5A DUJEkQDRgged5y9o2nFNjWA1jC617mGtlywmAqBRvDQA8cjO5ePF0P4jCe+RPpqW BRcLO59THqGkbFlB5JahSYo5gt1QwMoEGnT5YT8NGNcDUp1Cm+PfkTZrdgcv5nGj Or4nvmt7OuLIWG3yPeJJSfRU7HQbi2GmE6LtjEBPYWRjSzDxDMRyty3US2VtvhT2 aIa7HDX81yclUeyyeRoLL9PFvo759D9QM9OpuY1JNOuYqcJ56DHPA7N9dX2oK3wt tMbPxpH1sAzD/12demj+ULrxAwcDbfyz5QvD8Sx/+6RHxRKXuUSoPxI8RN/yzyeR aQuKKNByymFSJfWEm3s4mHCtf/lieyESMBLUnAVyP+doZOOI3dL2+DZIBzYXFQWp RFndoMLmHDnSCjo72J7N3KF2l2Fc5q94aXZJcp/erpVDkAf7XOXSxcqSWmx6Swn3 Uf18v/1Y2NJonUls7VFHpuT1AbPR2F1VYOh0VyrGzNnWqspH84ain2p2TxSXOLcV WkwMN5B2cXbs2nbtaqk+ =9mKZ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging Block layer patches (rebased Stefan's pull request) # gpg: Signature made Thu 12 Nov 2015 15:34:16 GMT using RSA key ID C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" * remotes/kevin/tags/for-upstream: (43 commits) block: Update copyright of the accounting code scsi-disk: Account for failed operations macio: Account for failed operations ide: Account for failed and invalid operations atapi: Account for failed and invalid operations xen_disk: Account for failed and invalid operations virtio-blk: Account for failed and invalid operations nvme: Account for failed and invalid operations iotests: Add test for the block device statistics block: Use QEMU_CLOCK_VIRTUAL for the accounting code in qtest mode qemu-io: Account for failed, invalid and flush operations block: New option to define the intervals for collecting I/O statistics block: Add average I/O queue depth to BlockDeviceTimedStats block: Compute minimum, maximum and average I/O latencies block: Allow configuring whether to account failed and invalid ops block: Add statistics for failed and invalid I/O operations block: Add idle_time_ns to BlockDeviceStats util: Infrastructure for computing recent averages block: define 'clock_type' for the accounting code ide: Account for write operations correctly ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
b2df6a79df
42 changed files with 2620 additions and 298 deletions
|
@ -47,6 +47,8 @@ check-unit-y += tests/test-thread-pool$(EXESUF)
|
|||
gcov-files-test-thread-pool-y = thread-pool.c
|
||||
gcov-files-test-hbitmap-y = util/hbitmap.c
|
||||
check-unit-y += tests/test-hbitmap$(EXESUF)
|
||||
gcov-files-test-hbitmap-y = blockjob.c
|
||||
check-unit-y += tests/test-blockjob-txn$(EXESUF)
|
||||
check-unit-y += tests/test-x86-cpuid$(EXESUF)
|
||||
# all code tested by test-x86-cpuid is inside topology.h
|
||||
gcov-files-test-x86-cpuid-y =
|
||||
|
@ -81,6 +83,7 @@ check-unit-y += tests/test-crypto-cipher$(EXESUF)
|
|||
check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlscredsx509$(EXESUF)
|
||||
check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlssession$(EXESUF)
|
||||
check-unit-$(CONFIG_LINUX) += tests/test-qga$(EXESUF)
|
||||
check-unit-y += tests/test-timed-average$(EXESUF)
|
||||
|
||||
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
|
||||
|
||||
|
@ -390,6 +393,7 @@ tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
|
|||
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
|
||||
tests/test-rfifolock$(EXESUF): tests/test-rfifolock.o $(test-util-obj-y)
|
||||
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
|
||||
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
|
||||
tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y)
|
||||
tests/test-iov$(EXESUF): tests/test-iov.o $(test-util-obj-y)
|
||||
tests/test-hbitmap$(EXESUF): tests/test-hbitmap.o $(test-util-obj-y)
|
||||
|
@ -409,6 +413,9 @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
|
|||
migration/vmstate.o migration/qemu-file.o migration/qemu-file-buf.o \
|
||||
migration/qemu-file-unix.o qjson.o \
|
||||
$(test-qom-obj-y)
|
||||
tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \
|
||||
libqemuutil.a stubs/clock-warp.o stubs/cpu-get-icount.o \
|
||||
stubs/notify-event.o stubs/replay.o
|
||||
|
||||
tests/test-qapi-types.c tests/test-qapi-types.h :\
|
||||
$(SRC_PATH)/tests/qapi-schema/qapi-schema-test.json $(SRC_PATH)/scripts/qapi-types.py $(qapi-py)
|
||||
|
|
|
@ -36,6 +36,23 @@ def try_remove(img):
|
|||
pass
|
||||
|
||||
|
||||
def transaction_action(action, **kwargs):
|
||||
return {
|
||||
'type': action,
|
||||
'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems())
|
||||
}
|
||||
|
||||
|
||||
def transaction_bitmap_clear(node, name, **kwargs):
|
||||
return transaction_action('block-dirty-bitmap-clear',
|
||||
node=node, name=name, **kwargs)
|
||||
|
||||
|
||||
def transaction_drive_backup(device, target, **kwargs):
|
||||
return transaction_action('drive-backup', device=device, target=target,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class Bitmap:
|
||||
def __init__(self, name, drive):
|
||||
self.name = name
|
||||
|
@ -122,9 +139,12 @@ class TestIncrementalBackup(iotests.QMPTestCase):
|
|||
def do_qmp_backup(self, error='Input/output error', **kwargs):
|
||||
res = self.vm.qmp('drive-backup', **kwargs)
|
||||
self.assert_qmp(res, 'return', {})
|
||||
return self.wait_qmp_backup(kwargs['device'], error)
|
||||
|
||||
|
||||
def wait_qmp_backup(self, device, error='Input/output error'):
|
||||
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
|
||||
match={'data': {'device': kwargs['device']}})
|
||||
match={'data': {'device': device}})
|
||||
self.assertNotEqual(event, None)
|
||||
|
||||
try:
|
||||
|
@ -139,6 +159,12 @@ class TestIncrementalBackup(iotests.QMPTestCase):
|
|||
return False
|
||||
|
||||
|
||||
def wait_qmp_backup_cancelled(self, device):
|
||||
event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
|
||||
match={'data': {'device': device}})
|
||||
self.assertNotEqual(event, None)
|
||||
|
||||
|
||||
def create_anchor_backup(self, drive=None):
|
||||
if drive is None:
|
||||
drive = self.drives[-1]
|
||||
|
@ -264,6 +290,43 @@ class TestIncrementalBackup(iotests.QMPTestCase):
|
|||
return self.do_incremental_simple(granularity=131072)
|
||||
|
||||
|
||||
def test_incremental_transaction(self):
|
||||
'''Test: Verify backups made from transactionally created bitmaps.
|
||||
|
||||
Create a bitmap "before" VM execution begins, then create a second
|
||||
bitmap AFTER writes have already occurred. Use transactions to create
|
||||
a full backup and synchronize both bitmaps to this backup.
|
||||
Create an incremental backup through both bitmaps and verify that
|
||||
both backups match the current drive0 image.
|
||||
'''
|
||||
|
||||
drive0 = self.drives[0]
|
||||
bitmap0 = self.add_bitmap('bitmap0', drive0)
|
||||
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
||||
('0xfe', '16M', '256k'),
|
||||
('0x64', '32736k', '64k')))
|
||||
bitmap1 = self.add_bitmap('bitmap1', drive0)
|
||||
|
||||
result = self.vm.qmp('transaction', actions=[
|
||||
transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
|
||||
transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
|
||||
transaction_drive_backup(drive0['id'], drive0['backup'],
|
||||
sync='full', format=drive0['fmt'])
|
||||
])
|
||||
self.assert_qmp(result, 'return', {})
|
||||
self.wait_until_completed(drive0['id'])
|
||||
self.files.append(drive0['backup'])
|
||||
|
||||
self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
|
||||
('0x55', '8M', '352k'),
|
||||
('0x78', '15872k', '1M')))
|
||||
# Both bitmaps should be correctly in sync.
|
||||
self.create_incremental(bitmap0)
|
||||
self.create_incremental(bitmap1)
|
||||
self.vm.shutdown()
|
||||
self.check_backups()
|
||||
|
||||
|
||||
def test_incremental_failure(self):
|
||||
'''Test: Verify backups made after a failure are correct.
|
||||
|
||||
|
@ -321,6 +384,123 @@ class TestIncrementalBackup(iotests.QMPTestCase):
|
|||
self.check_backups()
|
||||
|
||||
|
||||
def test_transaction_failure(self):
|
||||
'''Test: Verify backups made from a transaction that partially fails.
|
||||
|
||||
Add a second drive with its own unique pattern, and add a bitmap to each
|
||||
drive. Use blkdebug to interfere with the backup on just one drive and
|
||||
attempt to create a coherent incremental backup across both drives.
|
||||
|
||||
verify a failure in one but not both, then delete the failed stubs and
|
||||
re-run the same transaction.
|
||||
|
||||
verify that both incrementals are created successfully.
|
||||
'''
|
||||
|
||||
# Create a second drive, with pattern:
|
||||
drive1 = self.add_node('drive1')
|
||||
self.img_create(drive1['file'], drive1['fmt'])
|
||||
io_write_patterns(drive1['file'], (('0x14', 0, 512),
|
||||
('0x5d', '1M', '32k'),
|
||||
('0xcd', '32M', '124k')))
|
||||
|
||||
# Create a blkdebug interface to this img as 'drive1'
|
||||
result = self.vm.qmp('blockdev-add', options={
|
||||
'id': drive1['id'],
|
||||
'driver': drive1['fmt'],
|
||||
'file': {
|
||||
'driver': 'blkdebug',
|
||||
'image': {
|
||||
'driver': 'file',
|
||||
'filename': drive1['file']
|
||||
},
|
||||
'set-state': [{
|
||||
'event': 'flush_to_disk',
|
||||
'state': 1,
|
||||
'new_state': 2
|
||||
}],
|
||||
'inject-error': [{
|
||||
'event': 'read_aio',
|
||||
'errno': 5,
|
||||
'state': 2,
|
||||
'immediately': False,
|
||||
'once': True
|
||||
}],
|
||||
}
|
||||
})
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Create bitmaps and full backups for both drives
|
||||
drive0 = self.drives[0]
|
||||
dr0bm0 = self.add_bitmap('bitmap0', drive0)
|
||||
dr1bm0 = self.add_bitmap('bitmap0', drive1)
|
||||
self.create_anchor_backup(drive0)
|
||||
self.create_anchor_backup(drive1)
|
||||
self.assert_no_active_block_jobs()
|
||||
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
||||
|
||||
# Emulate some writes
|
||||
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
||||
('0xfe', '16M', '256k'),
|
||||
('0x64', '32736k', '64k')))
|
||||
self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
|
||||
('0xef', '16M', '256k'),
|
||||
('0x46', '32736k', '64k')))
|
||||
|
||||
# Create incremental backup targets
|
||||
target0 = self.prepare_backup(dr0bm0)
|
||||
target1 = self.prepare_backup(dr1bm0)
|
||||
|
||||
# Ask for a new incremental backup per-each drive,
|
||||
# expecting drive1's backup to fail:
|
||||
transaction = [
|
||||
transaction_drive_backup(drive0['id'], target0, sync='incremental',
|
||||
format=drive0['fmt'], mode='existing',
|
||||
bitmap=dr0bm0.name),
|
||||
transaction_drive_backup(drive1['id'], target1, sync='incremental',
|
||||
format=drive1['fmt'], mode='existing',
|
||||
bitmap=dr1bm0.name)
|
||||
]
|
||||
result = self.vm.qmp('transaction', actions=transaction,
|
||||
properties={'completion-mode': 'grouped'} )
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Observe that drive0's backup is cancelled and drive1 completes with
|
||||
# an error.
|
||||
self.wait_qmp_backup_cancelled(drive0['id'])
|
||||
self.assertFalse(self.wait_qmp_backup(drive1['id']))
|
||||
error = self.vm.event_wait('BLOCK_JOB_ERROR')
|
||||
self.assert_qmp(error, 'data', {'device': drive1['id'],
|
||||
'action': 'report',
|
||||
'operation': 'read'})
|
||||
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# Delete drive0's successful target and eliminate our record of the
|
||||
# unsuccessful drive1 target. Then re-run the same transaction.
|
||||
dr0bm0.del_target()
|
||||
dr1bm0.del_target()
|
||||
target0 = self.prepare_backup(dr0bm0)
|
||||
target1 = self.prepare_backup(dr1bm0)
|
||||
|
||||
# Re-run the exact same transaction.
|
||||
result = self.vm.qmp('transaction', actions=transaction,
|
||||
properties={'completion-mode':'grouped'})
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Both should complete successfully this time.
|
||||
self.assertTrue(self.wait_qmp_backup(drive0['id']))
|
||||
self.assertTrue(self.wait_qmp_backup(drive1['id']))
|
||||
self.make_reference_backup(dr0bm0)
|
||||
self.make_reference_backup(dr1bm0)
|
||||
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# And the images should of course validate.
|
||||
self.vm.shutdown()
|
||||
self.check_backups()
|
||||
|
||||
|
||||
def test_sync_dirty_bitmap_missing(self):
|
||||
self.assert_no_active_block_jobs()
|
||||
self.files.append(self.err_img)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.......
|
||||
.........
|
||||
----------------------------------------------------------------------
|
||||
Ran 7 tests
|
||||
Ran 9 tests
|
||||
|
||||
OK
|
||||
|
|
349
tests/qemu-iotests/136
Normal file
349
tests/qemu-iotests/136
Normal file
|
@ -0,0 +1,349 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Tests for block device statistics
|
||||
#
|
||||
# Copyright (C) 2015 Igalia, S.L.
|
||||
# Author: Alberto Garcia <berto@igalia.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import iotests
|
||||
import os
|
||||
|
||||
interval_length = 10
|
||||
nsec_per_sec = 1000000000
|
||||
op_latency = nsec_per_sec / 1000 # See qtest_latency_ns in accounting.c
|
||||
bad_sector = 8192
|
||||
bad_offset = bad_sector * 512
|
||||
blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf')
|
||||
|
||||
class BlockDeviceStatsTestCase(iotests.QMPTestCase):
|
||||
test_img = "null-aio://"
|
||||
total_rd_bytes = 0
|
||||
total_rd_ops = 0
|
||||
total_wr_bytes = 0
|
||||
total_wr_ops = 0
|
||||
total_wr_merged = 0
|
||||
total_flush_ops = 0
|
||||
failed_rd_ops = 0
|
||||
failed_wr_ops = 0
|
||||
invalid_rd_ops = 0
|
||||
invalid_wr_ops = 0
|
||||
wr_highest_offset = 0
|
||||
account_invalid = False
|
||||
account_failed = False
|
||||
|
||||
def blockstats(self, device):
|
||||
result = self.vm.qmp("query-blockstats")
|
||||
for r in result['return']:
|
||||
if r['device'] == device:
|
||||
return r['stats']
|
||||
raise Exception("Device not found for blockstats: %s" % device)
|
||||
|
||||
def create_blkdebug_file(self):
|
||||
file = open(blkdebug_file, 'w')
|
||||
file.write('''
|
||||
[inject-error]
|
||||
event = "read_aio"
|
||||
errno = "5"
|
||||
sector = "%d"
|
||||
|
||||
[inject-error]
|
||||
event = "write_aio"
|
||||
errno = "5"
|
||||
sector = "%d"
|
||||
''' % (bad_sector, bad_sector))
|
||||
file.close()
|
||||
|
||||
def setUp(self):
|
||||
drive_args = []
|
||||
drive_args.append("stats-intervals=%d" % interval_length)
|
||||
drive_args.append("stats-account-invalid=%s" %
|
||||
(self.account_invalid and "on" or "off"))
|
||||
drive_args.append("stats-account-failed=%s" %
|
||||
(self.account_failed and "on" or "off"))
|
||||
self.create_blkdebug_file()
|
||||
self.vm = iotests.VM().add_drive('blkdebug:%s:%s ' %
|
||||
(blkdebug_file, self.test_img),
|
||||
','.join(drive_args))
|
||||
self.vm.launch()
|
||||
# Set an initial value for the clock
|
||||
self.vm.qtest("clock_step %d" % nsec_per_sec)
|
||||
|
||||
def tearDown(self):
|
||||
self.vm.shutdown()
|
||||
os.remove(blkdebug_file)
|
||||
|
||||
def accounted_ops(self, read = False, write = False, flush = False):
|
||||
ops = 0
|
||||
if write:
|
||||
ops += self.total_wr_ops
|
||||
if self.account_failed:
|
||||
ops += self.failed_wr_ops
|
||||
if self.account_invalid:
|
||||
ops += self.invalid_wr_ops
|
||||
if read:
|
||||
ops += self.total_rd_ops
|
||||
if self.account_failed:
|
||||
ops += self.failed_rd_ops
|
||||
if self.account_invalid:
|
||||
ops += self.invalid_rd_ops
|
||||
if flush:
|
||||
ops += self.total_flush_ops
|
||||
return ops
|
||||
|
||||
def accounted_latency(self, read = False, write = False, flush = False):
|
||||
latency = 0
|
||||
if write:
|
||||
latency += self.total_wr_ops * op_latency
|
||||
if self.account_failed:
|
||||
latency += self.failed_wr_ops * op_latency
|
||||
if read:
|
||||
latency += self.total_rd_ops * op_latency
|
||||
if self.account_failed:
|
||||
latency += self.failed_rd_ops * op_latency
|
||||
if flush:
|
||||
latency += self.total_flush_ops * op_latency
|
||||
return latency
|
||||
|
||||
def check_values(self):
|
||||
stats = self.blockstats('drive0')
|
||||
|
||||
# Check that the totals match with what we have calculated
|
||||
self.assertEqual(self.total_rd_bytes, stats['rd_bytes'])
|
||||
self.assertEqual(self.total_wr_bytes, stats['wr_bytes'])
|
||||
self.assertEqual(self.total_rd_ops, stats['rd_operations'])
|
||||
self.assertEqual(self.total_wr_ops, stats['wr_operations'])
|
||||
self.assertEqual(self.total_flush_ops, stats['flush_operations'])
|
||||
self.assertEqual(self.wr_highest_offset, stats['wr_highest_offset'])
|
||||
self.assertEqual(self.failed_rd_ops, stats['failed_rd_operations'])
|
||||
self.assertEqual(self.failed_wr_ops, stats['failed_wr_operations'])
|
||||
self.assertEqual(self.invalid_rd_ops, stats['invalid_rd_operations'])
|
||||
self.assertEqual(self.invalid_wr_ops, stats['invalid_wr_operations'])
|
||||
self.assertEqual(self.account_invalid, stats['account_invalid'])
|
||||
self.assertEqual(self.account_failed, stats['account_failed'])
|
||||
self.assertEqual(self.total_wr_merged, stats['wr_merged'])
|
||||
|
||||
# Check that there's exactly one interval with the length we defined
|
||||
self.assertEqual(1, len(stats['timed_stats']))
|
||||
timed_stats = stats['timed_stats'][0]
|
||||
self.assertEqual(interval_length, timed_stats['interval_length'])
|
||||
|
||||
total_rd_latency = self.accounted_latency(read = True)
|
||||
if (total_rd_latency != 0):
|
||||
self.assertEqual(total_rd_latency, stats['rd_total_time_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['min_rd_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['max_rd_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['avg_rd_latency_ns'])
|
||||
self.assertLess(0, timed_stats['avg_rd_queue_depth'])
|
||||
else:
|
||||
self.assertEqual(0, stats['rd_total_time_ns'])
|
||||
self.assertEqual(0, timed_stats['min_rd_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['max_rd_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['avg_rd_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['avg_rd_queue_depth'])
|
||||
|
||||
# min read latency <= avg read latency <= max read latency
|
||||
self.assertLessEqual(timed_stats['min_rd_latency_ns'],
|
||||
timed_stats['avg_rd_latency_ns'])
|
||||
self.assertLessEqual(timed_stats['avg_rd_latency_ns'],
|
||||
timed_stats['max_rd_latency_ns'])
|
||||
|
||||
total_wr_latency = self.accounted_latency(write = True)
|
||||
if (total_wr_latency != 0):
|
||||
self.assertEqual(total_wr_latency, stats['wr_total_time_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['min_wr_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['max_wr_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['avg_wr_latency_ns'])
|
||||
self.assertLess(0, timed_stats['avg_wr_queue_depth'])
|
||||
else:
|
||||
self.assertEqual(0, stats['wr_total_time_ns'])
|
||||
self.assertEqual(0, timed_stats['min_wr_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['max_wr_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['avg_wr_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['avg_wr_queue_depth'])
|
||||
|
||||
# min write latency <= avg write latency <= max write latency
|
||||
self.assertLessEqual(timed_stats['min_wr_latency_ns'],
|
||||
timed_stats['avg_wr_latency_ns'])
|
||||
self.assertLessEqual(timed_stats['avg_wr_latency_ns'],
|
||||
timed_stats['max_wr_latency_ns'])
|
||||
|
||||
total_flush_latency = self.accounted_latency(flush = True)
|
||||
if (total_flush_latency != 0):
|
||||
self.assertEqual(total_flush_latency, stats['flush_total_time_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['min_flush_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['max_flush_latency_ns'])
|
||||
self.assertEqual(op_latency, timed_stats['avg_flush_latency_ns'])
|
||||
else:
|
||||
self.assertEqual(0, stats['flush_total_time_ns'])
|
||||
self.assertEqual(0, timed_stats['min_flush_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['max_flush_latency_ns'])
|
||||
self.assertEqual(0, timed_stats['avg_flush_latency_ns'])
|
||||
|
||||
# min flush latency <= avg flush latency <= max flush latency
|
||||
self.assertLessEqual(timed_stats['min_flush_latency_ns'],
|
||||
timed_stats['avg_flush_latency_ns'])
|
||||
self.assertLessEqual(timed_stats['avg_flush_latency_ns'],
|
||||
timed_stats['max_flush_latency_ns'])
|
||||
|
||||
# idle_time_ns must be > 0 if we have performed any operation
|
||||
if (self.accounted_ops(read = True, write = True, flush = True) != 0):
|
||||
self.assertLess(0, stats['idle_time_ns'])
|
||||
else:
|
||||
self.assertFalse(stats.has_key('idle_time_ns'))
|
||||
|
||||
# This test does not alter these, so they must be all 0
|
||||
self.assertEqual(0, stats['rd_merged'])
|
||||
self.assertEqual(0, stats['failed_flush_operations'])
|
||||
self.assertEqual(0, stats['invalid_flush_operations'])
|
||||
|
||||
def do_test_stats(self, rd_size = 0, rd_ops = 0, wr_size = 0, wr_ops = 0,
|
||||
flush_ops = 0, invalid_rd_ops = 0, invalid_wr_ops = 0,
|
||||
failed_rd_ops = 0, failed_wr_ops = 0, wr_merged = 0):
|
||||
# The 'ops' list will contain all the requested I/O operations
|
||||
ops = []
|
||||
for i in range(rd_ops):
|
||||
ops.append("aio_read %d %d" % (i * rd_size, rd_size))
|
||||
|
||||
for i in range(wr_ops):
|
||||
ops.append("aio_write %d %d" % (i * wr_size, wr_size))
|
||||
|
||||
for i in range(flush_ops):
|
||||
ops.append("aio_flush")
|
||||
|
||||
highest_offset = wr_ops * wr_size
|
||||
|
||||
# Two types of invalid operations: unaligned length and unaligned offset
|
||||
for i in range(invalid_rd_ops / 2):
|
||||
ops.append("aio_read 0 511")
|
||||
|
||||
for i in range(invalid_rd_ops / 2, invalid_rd_ops):
|
||||
ops.append("aio_read 13 512")
|
||||
|
||||
for i in range(invalid_wr_ops / 2):
|
||||
ops.append("aio_write 0 511")
|
||||
|
||||
for i in range(invalid_wr_ops / 2, invalid_wr_ops):
|
||||
ops.append("aio_write 13 512")
|
||||
|
||||
for i in range(failed_rd_ops):
|
||||
ops.append("aio_read %d 512" % bad_offset)
|
||||
|
||||
for i in range(failed_wr_ops):
|
||||
ops.append("aio_write %d 512" % bad_offset)
|
||||
|
||||
if failed_wr_ops > 0:
|
||||
highest_offset = max(highest_offset, bad_offset + 512)
|
||||
|
||||
for i in range(wr_merged):
|
||||
first = i * wr_size * 2
|
||||
second = first + wr_size
|
||||
ops.append("multiwrite %d %d ; %d %d" %
|
||||
(first, wr_size, second, wr_size))
|
||||
|
||||
highest_offset = max(highest_offset, wr_merged * wr_size * 2)
|
||||
|
||||
# Now perform all operations
|
||||
for op in ops:
|
||||
self.vm.hmp_qemu_io("drive0", op)
|
||||
|
||||
# Update the expected totals
|
||||
self.total_rd_bytes += rd_ops * rd_size
|
||||
self.total_rd_ops += rd_ops
|
||||
self.total_wr_bytes += wr_ops * wr_size
|
||||
self.total_wr_ops += wr_ops
|
||||
self.total_wr_merged += wr_merged
|
||||
self.total_flush_ops += flush_ops
|
||||
self.invalid_rd_ops += invalid_rd_ops
|
||||
self.invalid_wr_ops += invalid_wr_ops
|
||||
self.failed_rd_ops += failed_rd_ops
|
||||
self.failed_wr_ops += failed_wr_ops
|
||||
|
||||
self.wr_highest_offset = max(self.wr_highest_offset, highest_offset)
|
||||
|
||||
# Advance the clock so idle_time_ns has a meaningful value
|
||||
self.vm.qtest("clock_step %d" % nsec_per_sec)
|
||||
|
||||
# And check that the actual statistics match the expected ones
|
||||
self.check_values()
|
||||
|
||||
def test_read_only(self):
|
||||
test_values = [[512, 1],
|
||||
[65536, 1],
|
||||
[512, 12],
|
||||
[65536, 12]]
|
||||
for i in test_values:
|
||||
self.do_test_stats(rd_size = i[0], rd_ops = i[1])
|
||||
|
||||
def test_write_only(self):
|
||||
test_values = [[512, 1],
|
||||
[65536, 1],
|
||||
[512, 12],
|
||||
[65536, 12]]
|
||||
for i in test_values:
|
||||
self.do_test_stats(wr_size = i[0], wr_ops = i[1])
|
||||
|
||||
def test_invalid(self):
|
||||
self.do_test_stats(invalid_rd_ops = 7)
|
||||
self.do_test_stats(invalid_wr_ops = 3)
|
||||
self.do_test_stats(invalid_rd_ops = 4, invalid_wr_ops = 5)
|
||||
|
||||
def test_failed(self):
|
||||
self.do_test_stats(failed_rd_ops = 8)
|
||||
self.do_test_stats(failed_wr_ops = 6)
|
||||
self.do_test_stats(failed_rd_ops = 5, failed_wr_ops = 12)
|
||||
|
||||
def test_flush(self):
|
||||
self.do_test_stats(flush_ops = 8)
|
||||
|
||||
def test_merged(self):
|
||||
for i in range(5):
|
||||
self.do_test_stats(wr_merged = i * 3)
|
||||
|
||||
def test_all(self):
|
||||
# rd_size, rd_ops, wr_size, wr_ops, flush_ops
|
||||
# invalid_rd_ops, invalid_wr_ops,
|
||||
# failed_rd_ops, failed_wr_ops
|
||||
# wr_merged
|
||||
test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 1],
|
||||
[65536, 1, 2048, 12, 7, 7, 5, 2, 5, 5],
|
||||
[32768, 9, 8192, 1, 4, 3, 2, 4, 6, 4],
|
||||
[16384, 11, 3584, 16, 9, 8, 6, 7, 3, 4]]
|
||||
for i in test_values:
|
||||
self.do_test_stats(*i)
|
||||
|
||||
def test_no_op(self):
|
||||
# All values must be sane before doing any I/O
|
||||
self.check_values()
|
||||
|
||||
|
||||
class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase):
|
||||
account_invalid = True
|
||||
account_failed = False
|
||||
|
||||
class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase):
|
||||
account_invalid = False
|
||||
account_failed = True
|
||||
|
||||
class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase):
|
||||
account_invalid = True
|
||||
account_failed = True
|
||||
|
||||
class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase):
|
||||
test_img = "null-co://"
|
||||
|
||||
if __name__ == '__main__':
|
||||
iotests.main(supported_fmts=["raw"])
|
5
tests/qemu-iotests/136.out
Normal file
5
tests/qemu-iotests/136.out
Normal file
|
@ -0,0 +1,5 @@
|
|||
........................................
|
||||
----------------------------------------------------------------------
|
||||
Ran 40 tests
|
||||
|
||||
OK
|
|
@ -136,6 +136,7 @@
|
|||
132 rw auto quick
|
||||
134 rw auto quick
|
||||
135 rw auto
|
||||
136 rw auto
|
||||
137 rw auto
|
||||
138 rw auto quick
|
||||
139 rw auto quick
|
||||
|
|
250
tests/test-blockjob-txn.c
Normal file
250
tests/test-blockjob-txn.c
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Blockjob transactions tests
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2015
|
||||
*
|
||||
* Authors:
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <glib.h>
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "block/blockjob.h"
|
||||
|
||||
typedef struct {
|
||||
BlockJob common;
|
||||
unsigned int iterations;
|
||||
bool use_timer;
|
||||
int rc;
|
||||
int *result;
|
||||
} TestBlockJob;
|
||||
|
||||
static const BlockJobDriver test_block_job_driver = {
|
||||
.instance_size = sizeof(TestBlockJob),
|
||||
};
|
||||
|
||||
static void test_block_job_complete(BlockJob *job, void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = job->bs;
|
||||
int rc = (intptr_t)opaque;
|
||||
|
||||
if (block_job_is_cancelled(job)) {
|
||||
rc = -ECANCELED;
|
||||
}
|
||||
|
||||
block_job_completed(job, rc);
|
||||
bdrv_unref(bs);
|
||||
}
|
||||
|
||||
static void coroutine_fn test_block_job_run(void *opaque)
|
||||
{
|
||||
TestBlockJob *s = opaque;
|
||||
BlockJob *job = &s->common;
|
||||
|
||||
while (s->iterations--) {
|
||||
if (s->use_timer) {
|
||||
block_job_sleep_ns(job, QEMU_CLOCK_REALTIME, 0);
|
||||
} else {
|
||||
block_job_yield(job);
|
||||
}
|
||||
|
||||
if (block_job_is_cancelled(job)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
block_job_defer_to_main_loop(job, test_block_job_complete,
|
||||
(void *)(intptr_t)s->rc);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
TestBlockJob *job;
|
||||
int *result;
|
||||
} TestBlockJobCBData;
|
||||
|
||||
static void test_block_job_cb(void *opaque, int ret)
|
||||
{
|
||||
TestBlockJobCBData *data = opaque;
|
||||
if (!ret && block_job_is_cancelled(&data->job->common)) {
|
||||
ret = -ECANCELED;
|
||||
}
|
||||
*data->result = ret;
|
||||
g_free(data);
|
||||
}
|
||||
|
||||
/* Create a block job that completes with a given return code after a given
|
||||
* number of event loop iterations. The return code is stored in the given
|
||||
* result pointer.
|
||||
*
|
||||
* The event loop iterations can either be handled automatically with a 0 delay
|
||||
* timer, or they can be stepped manually by entering the coroutine.
|
||||
*/
|
||||
static BlockJob *test_block_job_start(unsigned int iterations,
|
||||
bool use_timer,
|
||||
int rc, int *result)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
TestBlockJob *s;
|
||||
TestBlockJobCBData *data;
|
||||
|
||||
data = g_new0(TestBlockJobCBData, 1);
|
||||
bs = bdrv_new();
|
||||
s = block_job_create(&test_block_job_driver, bs, 0, test_block_job_cb,
|
||||
data, &error_abort);
|
||||
s->iterations = iterations;
|
||||
s->use_timer = use_timer;
|
||||
s->rc = rc;
|
||||
s->result = result;
|
||||
s->common.co = qemu_coroutine_create(test_block_job_run);
|
||||
data->job = s;
|
||||
data->result = result;
|
||||
qemu_coroutine_enter(s->common.co, s);
|
||||
return &s->common;
|
||||
}
|
||||
|
||||
static void test_single_job(int expected)
|
||||
{
|
||||
BlockJob *job;
|
||||
BlockJobTxn *txn;
|
||||
int result = -EINPROGRESS;
|
||||
|
||||
txn = block_job_txn_new();
|
||||
job = test_block_job_start(1, true, expected, &result);
|
||||
block_job_txn_add_job(txn, job);
|
||||
|
||||
if (expected == -ECANCELED) {
|
||||
block_job_cancel(job);
|
||||
}
|
||||
|
||||
while (result == -EINPROGRESS) {
|
||||
aio_poll(qemu_get_aio_context(), true);
|
||||
}
|
||||
g_assert_cmpint(result, ==, expected);
|
||||
|
||||
block_job_txn_unref(txn);
|
||||
}
|
||||
|
||||
static void test_single_job_success(void)
|
||||
{
|
||||
test_single_job(0);
|
||||
}
|
||||
|
||||
static void test_single_job_failure(void)
|
||||
{
|
||||
test_single_job(-EIO);
|
||||
}
|
||||
|
||||
static void test_single_job_cancel(void)
|
||||
{
|
||||
test_single_job(-ECANCELED);
|
||||
}
|
||||
|
||||
static void test_pair_jobs(int expected1, int expected2)
|
||||
{
|
||||
BlockJob *job1;
|
||||
BlockJob *job2;
|
||||
BlockJobTxn *txn;
|
||||
int result1 = -EINPROGRESS;
|
||||
int result2 = -EINPROGRESS;
|
||||
|
||||
txn = block_job_txn_new();
|
||||
job1 = test_block_job_start(1, true, expected1, &result1);
|
||||
block_job_txn_add_job(txn, job1);
|
||||
job2 = test_block_job_start(2, true, expected2, &result2);
|
||||
block_job_txn_add_job(txn, job2);
|
||||
|
||||
if (expected1 == -ECANCELED) {
|
||||
block_job_cancel(job1);
|
||||
}
|
||||
if (expected2 == -ECANCELED) {
|
||||
block_job_cancel(job2);
|
||||
}
|
||||
|
||||
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
|
||||
aio_poll(qemu_get_aio_context(), true);
|
||||
}
|
||||
|
||||
/* Failure or cancellation of one job cancels the other job */
|
||||
if (expected1 != 0) {
|
||||
expected2 = -ECANCELED;
|
||||
} else if (expected2 != 0) {
|
||||
expected1 = -ECANCELED;
|
||||
}
|
||||
|
||||
g_assert_cmpint(result1, ==, expected1);
|
||||
g_assert_cmpint(result2, ==, expected2);
|
||||
|
||||
block_job_txn_unref(txn);
|
||||
}
|
||||
|
||||
static void test_pair_jobs_success(void)
|
||||
{
|
||||
test_pair_jobs(0, 0);
|
||||
}
|
||||
|
||||
static void test_pair_jobs_failure(void)
|
||||
{
|
||||
/* Test both orderings. The two jobs run for a different number of
|
||||
* iterations so the code path is different depending on which job fails
|
||||
* first.
|
||||
*/
|
||||
test_pair_jobs(-EIO, 0);
|
||||
test_pair_jobs(0, -EIO);
|
||||
}
|
||||
|
||||
static void test_pair_jobs_cancel(void)
|
||||
{
|
||||
test_pair_jobs(-ECANCELED, 0);
|
||||
test_pair_jobs(0, -ECANCELED);
|
||||
}
|
||||
|
||||
static void test_pair_jobs_fail_cancel_race(void)
|
||||
{
|
||||
BlockJob *job1;
|
||||
BlockJob *job2;
|
||||
BlockJobTxn *txn;
|
||||
int result1 = -EINPROGRESS;
|
||||
int result2 = -EINPROGRESS;
|
||||
|
||||
txn = block_job_txn_new();
|
||||
job1 = test_block_job_start(1, true, -ECANCELED, &result1);
|
||||
block_job_txn_add_job(txn, job1);
|
||||
job2 = test_block_job_start(2, false, 0, &result2);
|
||||
block_job_txn_add_job(txn, job2);
|
||||
|
||||
block_job_cancel(job1);
|
||||
|
||||
/* Now make job2 finish before the main loop kicks jobs. This simulates
|
||||
* the race between a pending kick and another job completing.
|
||||
*/
|
||||
block_job_enter(job2);
|
||||
block_job_enter(job2);
|
||||
|
||||
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
|
||||
aio_poll(qemu_get_aio_context(), true);
|
||||
}
|
||||
|
||||
g_assert_cmpint(result1, ==, -ECANCELED);
|
||||
g_assert_cmpint(result2, ==, -ECANCELED);
|
||||
|
||||
block_job_txn_unref(txn);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
qemu_init_main_loop(&error_abort);
|
||||
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
g_test_add_func("/single/success", test_single_job_success);
|
||||
g_test_add_func("/single/failure", test_single_job_failure);
|
||||
g_test_add_func("/single/cancel", test_single_job_cancel);
|
||||
g_test_add_func("/pair/success", test_pair_jobs_success);
|
||||
g_test_add_func("/pair/failure", test_pair_jobs_failure);
|
||||
g_test_add_func("/pair/cancel", test_pair_jobs_cancel);
|
||||
g_test_add_func("/pair/fail-cancel-race", test_pair_jobs_fail_cancel_race);
|
||||
return g_test_run();
|
||||
}
|
90
tests/test-timed-average.c
Normal file
90
tests/test-timed-average.c
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Timed average computation tests
|
||||
*
|
||||
* Copyright Nodalink, EURL. 2014
|
||||
*
|
||||
* Authors:
|
||||
* Benoît Canet <benoit.canet@nodalink.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <glib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "qemu/timed-average.h"
|
||||
|
||||
/* This is the clock for QEMU_CLOCK_VIRTUAL */
|
||||
static int64_t my_clock_value;
|
||||
|
||||
int64_t cpu_get_clock(void)
|
||||
{
|
||||
return my_clock_value;
|
||||
}
|
||||
|
||||
static void account(TimedAverage *ta)
|
||||
{
|
||||
timed_average_account(ta, 1);
|
||||
timed_average_account(ta, 5);
|
||||
timed_average_account(ta, 2);
|
||||
timed_average_account(ta, 4);
|
||||
timed_average_account(ta, 3);
|
||||
}
|
||||
|
||||
static void test_average(void)
|
||||
{
|
||||
TimedAverage ta;
|
||||
uint64_t result;
|
||||
int i;
|
||||
|
||||
/* we will compute some average on a period of 1 second */
|
||||
timed_average_init(&ta, QEMU_CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND);
|
||||
|
||||
result = timed_average_min(&ta);
|
||||
g_assert(result == 0);
|
||||
result = timed_average_avg(&ta);
|
||||
g_assert(result == 0);
|
||||
result = timed_average_max(&ta);
|
||||
g_assert(result == 0);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
account(&ta);
|
||||
result = timed_average_min(&ta);
|
||||
g_assert(result == 1);
|
||||
result = timed_average_avg(&ta);
|
||||
g_assert(result == 3);
|
||||
result = timed_average_max(&ta);
|
||||
g_assert(result == 5);
|
||||
my_clock_value += NANOSECONDS_PER_SECOND / 10;
|
||||
}
|
||||
|
||||
my_clock_value += NANOSECONDS_PER_SECOND * 100;
|
||||
|
||||
result = timed_average_min(&ta);
|
||||
g_assert(result == 0);
|
||||
result = timed_average_avg(&ta);
|
||||
g_assert(result == 0);
|
||||
result = timed_average_max(&ta);
|
||||
g_assert(result == 0);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
account(&ta);
|
||||
result = timed_average_min(&ta);
|
||||
g_assert(result == 1);
|
||||
result = timed_average_avg(&ta);
|
||||
g_assert(result == 3);
|
||||
result = timed_average_max(&ta);
|
||||
g_assert(result == 5);
|
||||
my_clock_value += NANOSECONDS_PER_SECOND / 10;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* tests in the same order as the header function declarations */
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
g_test_add_func("/timed-average/average", test_average);
|
||||
return g_test_run();
|
||||
}
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue