scripts: twister: Fix NOTRUN in test_only

When using the --build-only into --test-only
Twister setup, NOTRUN statuses were not properly rerun.

Now they are properly run again if runnable.

Signed-off-by: Lukasz Mrugala <lukaszx.mrugala@intel.com>
This commit is contained in:
Lukasz Mrugala 2024-10-22 10:05:44 +00:00 committed by Mahesh Mahadevan
parent 1e5a537ade
commit 9dc0af55e2
5 changed files with 26 additions and 14 deletions

View file

@ -354,6 +354,12 @@ class Reporting:
elif instance.status == TwisterStatus.SKIP:
suite["status"] = TwisterStatus.SKIP
suite["reason"] = instance.reason
elif instance.status == TwisterStatus.NOTRUN:
suite["status"] = TwisterStatus.NOTRUN
suite["reason"] = instance.reason
else:
suite["status"] = TwisterStatus.NONE
suite["reason"] = 'Unknown Instance status.'
if instance.status != TwisterStatus.NONE:
suite["execution_time"] = f"{float(handler_time):.2f}"

View file

@ -659,6 +659,9 @@ class TestPlan:
self.hwm
)
if self.options.test_only and not instance.run:
continue
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['used_ram'] = ts.get("used_ram", 0)
instance.metrics['used_rom'] = ts.get("used_rom",0)
@ -676,9 +679,9 @@ class TestPlan:
instance.status = TwisterStatus.NONE
instance.reason = None
instance.retries += 1
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == TwisterStatus.PASS and instance.run and self.options.test_only:
# test marked as built only can run when --test-only is used.
# Reset status to capture new results.
elif status == TwisterStatus.NOTRUN and instance.run and self.options.test_only:
instance.status = TwisterStatus.NONE
instance.reason = None
else:

View file

@ -766,6 +766,7 @@ def test_testplan_load(
testplan.apply_filters = mock.Mock()
with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
mock.patch('twisterlib.testinstance.TestInstance.check_runnable', return_value=True), \
pytest.raises(exception) if exception else nullcontext():
testplan.load()
@ -1600,7 +1601,7 @@ def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
'testcases': {
'TS1.tc1': {
'status': TwisterStatus.PASS,
'reason': None,
'reason': 'passed',
'duration': 60.0,
'output': ''
}

View file

@ -16,6 +16,7 @@ import re
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock, clear_log_in_test
from twisterlib.statuses import TwisterStatus
from twisterlib.testplan import TestPlan
@ -76,9 +77,10 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
@ -137,7 +139,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
assert self.RAM_KEY in ts
old_values += [ts[self.RAM_KEY]]
@ -162,7 +164,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
assert self.RAM_KEY in ts
new_values += [ts[self.RAM_KEY]]
@ -202,7 +204,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
@ -271,7 +273,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
@ -344,7 +346,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
@ -441,7 +443,7 @@ class TestFootprint:
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
if TwisterStatus(ts.get('status')) == TwisterStatus.NOTRUN:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:

View file

@ -46,19 +46,19 @@ class TestRunner:
['qemu_x86/atom', 'qemu_x86_64/atom', 'intel_adl_crb/alder_lake'],
{
'selected_test_scenarios': 3,
'selected_test_instances': 6,
'selected_test_instances': 4,
'skipped_configurations': 0,
'skipped_by_static_filter': 0,
'skipped_at_runtime': 0,
'passed_configurations': 4,
'built_configurations': 2,
'built_configurations': 0,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 8,
'skipped_test_cases': 0,
'platform_count': 0,
'executed_on_platform': 4,
'only_built': 2
'only_built': 0
}
)
]