scripts: twister: Enchance TestCase/Instance info and presentation

ExecutionCounter has been expanded and now hold i.a.
more information on the statuses of TestCases.
This information is now incorporated in relevant summaries
- runner.py and reports.py.
Layout of those was changed to present that
and previous information in a clear and concise way.

TestInstance execution counter now is more intuitive.
Instances filtered out before running are no longer included there.
Retries now properly reset the counter.

TestCases with None and other incorrect final statuses
are logged as errors, but do not
exit Twister with a nonzero exit code.
This is because None statuses, although incorrect,
are currently common.

Inconsistent spacing in ERROR and FAILED fixed.

Signed-off-by: Lukasz Mrugala <lukaszx.mrugala@intel.com>

scripts: Dmitri fix

Fix of a problem noticed by Dmitri

Removed unnecessary additional spaces when
printing FAILED and ERROR status.
Now TwisterStatus.get_color is used more.

Signed-off-by: Lukasz Mrugala <lukaszx.mrugala@intel.com>
This commit is contained in:
Lukasz Mrugala 2024-08-21 11:24:51 +00:00 committed by Mahesh Mahadevan
parent 502c9ffb32
commit ff1857af55
7 changed files with 547 additions and 171 deletions

View file

@ -584,39 +584,43 @@ class Reporting:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {} built (not run), {}{}{} failed, {}{}{} errored, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
results.notrun,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
Fore.RED if results.error else Fore.RESET,
results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.plan.warnings else Fore.RESET,
self.plan.warnings,
Fore.RESET,
duration))
f"{TwisterStatus.get_color(TwisterStatus.FAIL) if failed else TwisterStatus.get_color(TwisterStatus.PASS)}{results.passed}"
f" of {results.total - results.skipped_configs}{Fore.RESET}"
f" executed test configurations passed ({pass_rate:.2%}),"
f" {f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' if results.notrun else f'{results.notrun}'} built (not run),"
f" {f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' if results.failed else f'{results.failed}'} failed,"
f" {f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' if results.error else f'{results.error}'} errored,"
f" with {f'{Fore.YELLOW}{self.plan.warnings}{Fore.RESET}' if self.plan.warnings else 'no'} warnings"
f" in {duration:.2f} seconds."
)
total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.env.options.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases - results.notrun,
results.skipped_cases,
len(self.filtered_platforms),
total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
executed_cases = results.cases - results.filtered_cases - results.skipped_cases - results.notrun_cases
pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \
if executed_cases != 0 else 0
platform_rate = (100 * len(self.filtered_platforms) / len(self.platforms))
logger.info(
f'{results.passed_cases} of {executed_cases} executed test cases passed ({pass_rate:02.2f}%)'
f'{", " + str(results.blocked_cases) + " blocked" if results.blocked_cases else ""}'
f'{", " + str(results.failed_cases) + " failed" if results.failed_cases else ""}'
f'{", " + str(results.error_cases) + " errored" if results.error_cases else ""}'
f'{", " + str(results.none_cases) + " without a status" if results.none_cases else ""}'
f' on {len(self.filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).'
)
if results.skipped_cases or results.filtered_cases or results.notrun_cases:
logger.info(
f'{results.skipped_cases + results.filtered_cases} selected test cases not executed:' \
f'{" " + str(results.skipped_cases) + " skipped" if results.skipped_cases else ""}' \
f'{(", " if results.skipped_cases else " ") + str(results.filtered_cases) + " filtered" if results.filtered_cases else ""}' \
f'{(", " if results.skipped_cases or results.filtered_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \
f'.'
)
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:

View file

@ -5,6 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
import logging
from math import log10
import multiprocessing
import os
import pickle
@ -62,12 +63,16 @@ class ExecutionCounter(object):
def __init__(self, total=0):
'''
Most of the stats are at test instance level
Except that "_cases" and "_skipped_cases" are for cases of ALL test instances
Except that case statistics are for cases of ALL test instances
total complete = done + skipped_filter
total = yaml test scenarios * applicable platforms
complete perctenage = (done + skipped_filter) / total
done := instances that reached report_out stage of the pipeline
done = skipped_configs + passed + failed + error
completed = done - skipped_filter
skipped_configs = skipped_runtime + skipped_filter
pass rate = passed / (total - skipped_configs)
case pass rate = passed_cases / (cases - filtered_cases - skipped_cases)
'''
# instances that go through the pipeline
# updated by report_out()
@ -92,13 +97,10 @@ class ExecutionCounter(object):
# updated by report_out()
self._skipped_runtime = Value('i', 0)
# staic filtered at yaml parsing time
# static filtered at yaml parsing time
# updated by update_counting_before_pipeline()
self._skipped_filter = Value('i', 0)
# updated by update_counting_before_pipeline() and report_out()
self._skipped_cases = Value('i', 0)
# updated by report_out() in pipeline
self._error = Value('i', 0)
self._failed = Value('i', 0)
@ -106,25 +108,83 @@ class ExecutionCounter(object):
# initialized to number of test instances
self._total = Value('i', total)
#######################################
# TestCase counters for all instances #
#######################################
# updated in report_out
self._cases = Value('i', 0)
# updated by update_counting_before_pipeline() and report_out()
self._skipped_cases = Value('i', 0)
self._filtered_cases = Value('i', 0)
# updated by report_out() in pipeline
self._passed_cases = Value('i', 0)
self._notrun_cases = Value('i', 0)
self._failed_cases = Value('i', 0)
self._error_cases = Value('i', 0)
self._blocked_cases = Value('i', 0)
# Incorrect statuses
self._none_cases = Value('i', 0)
self._started_cases = Value('i', 0)
self.lock = Lock()
@staticmethod
def _find_number_length(n):
if n > 0:
length = int(log10(n))+1
elif n == 0:
length = 1
else:
length = int(log10(-n))+2
return length
def summary(self):
print("--------------------------------")
print(f"Total test suites: {self.total}") # actually test instances
print(f"Total test cases: {self.cases}")
print(f"Executed test cases: {self.cases - self.skipped_cases}")
print(f"Skipped test cases: {self.skipped_cases}")
print(f"Completed test suites: {self.done}")
print(f"Passing test suites: {self.passed}")
print(f"Built only test suites: {self.notrun}")
print(f"Failing test suites: {self.failed}")
print(f"Skipped test suites: {self.skipped_configs}")
print(f"Skipped test suites (runtime): {self.skipped_runtime}")
print(f"Skipped test suites (filter): {self.skipped_filter}")
print(f"Errors: {self.error}")
print("--------------------------------")
executed_cases = self.cases - self.skipped_cases - self.filtered_cases
completed_configs = self.done - self.skipped_filter
# Find alignment length for aesthetic printing
suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done)
processed_suites_n_length = self._find_number_length(self.done)
completed_suites_n_length = self._find_number_length(completed_configs)
skipped_suites_n_length = self._find_number_length(self.skipped_configs)
total_cases_n_length = self._find_number_length(self.cases)
executed_cases_n_length = self._find_number_length(executed_cases)
print("--------------------------------------------------")
print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances
print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}")
print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{processed_suites_n_length}}")
print(f"└─ {'Completed test suites: ':<37}{completed_configs:>{processed_suites_n_length}}")
print(f" ├─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{completed_suites_n_length}}")
print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}")
print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}")
print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}")
print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}")
print(f"")
print(f"{'Filtered test suites: ':<21}{self.skipped_configs}")
print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{skipped_suites_n_length}}")
print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{skipped_suites_n_length}}")
print("---------------------- ----------------------")
print(f"{'Total test cases: ':<18}{self.cases}")
print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}")
print(f"├─ {'Skipped test cases: ':<21}{self.skipped_cases:>{total_cases_n_length}}")
print(f"└─ {'Executed test cases: ':<21}{executed_cases:>{total_cases_n_length}}")
print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{executed_cases_n_length}}")
print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{executed_cases_n_length}}")
print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{executed_cases_n_length}}")
print(f" ├─ {'Failed test cases: ':<25}{self.failed_cases:>{executed_cases_n_length}}")
print(f" {'' if self.none_cases or self.started_cases else ''}{'Errors in test cases: ':<25}{self.error_cases:>{executed_cases_n_length}}")
if self.none_cases or self.started_cases:
print(f" ├──── The following test case statuses should not appear in a proper execution ───")
if self.none_cases:
print(f" {'' if self.started_cases else ''}{'Statusless test cases: ':<25}{self.none_cases:>{executed_cases_n_length}}")
if self.started_cases:
print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{executed_cases_n_length}}")
print("--------------------------------------------------")
@property
def cases(self):
@ -136,6 +196,10 @@ class ExecutionCounter(object):
with self._cases.get_lock():
self._cases.value = value
def cases_increment(self, value=1):
with self._cases.get_lock():
self._cases.value += value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
@ -146,6 +210,122 @@ class ExecutionCounter(object):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
def skipped_cases_increment(self, value=1):
with self._skipped_cases.get_lock():
self._skipped_cases.value += value
@property
def filtered_cases(self):
with self._filtered_cases.get_lock():
return self._filtered_cases.value
@filtered_cases.setter
def filtered_cases(self, value):
with self._filtered_cases.get_lock():
self._filtered_cases.value = value
def filtered_cases_increment(self, value=1):
with self._filtered_cases.get_lock():
self._filtered_cases.value += value
@property
def passed_cases(self):
with self._passed_cases.get_lock():
return self._passed_cases.value
@passed_cases.setter
def passed_cases(self, value):
with self._passed_cases.get_lock():
self._passed_cases.value = value
def passed_cases_increment(self, value=1):
with self._passed_cases.get_lock():
self._passed_cases.value += value
@property
def notrun_cases(self):
with self._notrun_cases.get_lock():
return self._notrun_cases.value
@notrun_cases.setter
def notrun_cases(self, value):
with self._notrun.get_lock():
self._notrun.value = value
def notrun_cases_increment(self, value=1):
with self._notrun_cases.get_lock():
self._notrun_cases.value += value
@property
def failed_cases(self):
with self._failed_cases.get_lock():
return self._failed_cases.value
@failed_cases.setter
def failed_cases(self, value):
with self._failed_cases.get_lock():
self._failed_cases.value = value
def failed_cases_increment(self, value=1):
with self._failed_cases.get_lock():
self._failed_cases.value += value
@property
def error_cases(self):
with self._error_cases.get_lock():
return self._error_cases.value
@error_cases.setter
def error_cases(self, value):
with self._error_cases.get_lock():
self._error_cases.value = value
def error_cases_increment(self, value=1):
with self._error_cases.get_lock():
self._error_cases.value += value
@property
def blocked_cases(self):
with self._blocked_cases.get_lock():
return self._blocked_cases.value
@blocked_cases.setter
def blocked_cases(self, value):
with self._blocked_cases.get_lock():
self._blocked_cases.value = value
def blocked_cases_increment(self, value=1):
with self._blocked_cases.get_lock():
self._blocked_cases.value += value
@property
def none_cases(self):
with self._none_cases.get_lock():
return self._none_cases.value
@none_cases.setter
def none_cases(self, value):
with self._none_cases.get_lock():
self._none_cases.value = value
def none_cases_increment(self, value=1):
with self._none_cases.get_lock():
self._none_cases.value += value
@property
def started_cases(self):
with self._started_cases.get_lock():
return self._started_cases.value
@started_cases.setter
def started_cases(self, value):
with self._started_cases.get_lock():
self._started_cases.value = value
def started_cases_increment(self, value=1):
with self._started_cases.get_lock():
self._started_cases.value += value
@property
def error(self):
with self._error.get_lock():
@ -156,6 +336,10 @@ class ExecutionCounter(object):
with self._error.get_lock():
self._error.value = value
def error_increment(self, value=1):
with self._error.get_lock():
self._error.value += value
@property
def iteration(self):
with self._iteration.get_lock():
@ -166,6 +350,10 @@ class ExecutionCounter(object):
with self._iteration.get_lock():
self._iteration.value = value
def iteration_increment(self, value=1):
with self._iteration.get_lock():
self._iteration.value += value
@property
def done(self):
with self._done.get_lock():
@ -176,6 +364,10 @@ class ExecutionCounter(object):
with self._done.get_lock():
self._done.value = value
def done_increment(self, value=1):
with self._done.get_lock():
self._done.value += value
@property
def passed(self):
with self._passed.get_lock():
@ -186,6 +378,10 @@ class ExecutionCounter(object):
with self._passed.get_lock():
self._passed.value = value
def passed_increment(self, value=1):
with self._passed.get_lock():
self._passed.value += value
@property
def notrun(self):
with self._notrun.get_lock():
@ -196,6 +392,10 @@ class ExecutionCounter(object):
with self._notrun.get_lock():
self._notrun.value = value
def notrun_increment(self, value=1):
with self._notrun.get_lock():
self._notrun.value += value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
@ -206,6 +406,10 @@ class ExecutionCounter(object):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
def skipped_configs_increment(self, value=1):
with self._skipped_configs.get_lock():
self._skipped_configs.value += value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
@ -216,6 +420,10 @@ class ExecutionCounter(object):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
def skipped_filter_increment(self, value=1):
with self._skipped_filter.get_lock():
self._skipped_filter.value += value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
@ -226,6 +434,10 @@ class ExecutionCounter(object):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
def skipped_runtime_increment(self, value=1):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value += value
@property
def failed(self):
with self._failed.get_lock():
@ -236,11 +448,24 @@ class ExecutionCounter(object):
with self._failed.get_lock():
self._failed.value = value
def failed_increment(self, value=1):
with self._failed.get_lock():
self._failed.value += value
@property
def total(self):
with self._total.get_lock():
return self._total.value
@total.setter
def total(self, value):
with self._total.get_lock():
self._total.value = value
def total_increment(self, value=1):
with self._total.get_lock():
self._total.value += value
class CMake:
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
@ -652,7 +877,7 @@ class ProjectBuilder(FilterBuilder):
logger.debug("filtering %s" % self.instance.name)
self.instance.status = TwisterStatus.FILTER
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
results.skipped_runtime_increment()
self.instance.add_missing_case_status(TwisterStatus.SKIP)
next_op = 'report'
else:
@ -683,7 +908,7 @@ class ProjectBuilder(FilterBuilder):
logger.debug("filtering %s" % self.instance.name)
self.instance.status = TwisterStatus.FILTER
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
results.skipped_runtime_increment()
self.instance.add_missing_case_status(TwisterStatus.SKIP)
next_op = 'report'
else:
@ -710,7 +935,7 @@ class ProjectBuilder(FilterBuilder):
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == TwisterStatus.SKIP:
results.skipped_runtime += 1
results.skipped_runtime_increment()
self.instance.add_missing_case_status(TwisterStatus.SKIP, self.instance.reason)
if ret.get('returncode', 1) > 0:
@ -1069,52 +1294,84 @@ class ProjectBuilder(FilterBuilder):
with open(file_path, "wt") as file:
file.write(data)
@staticmethod
def _add_instance_testcases_to_status_counts(instance, results, decrement=False):
increment_value = -1 if decrement else 1
for tc in instance.testcases:
match tc.status:
case TwisterStatus.PASS:
results.passed_cases_increment(increment_value)
case TwisterStatus.NOTRUN:
results.notrun_cases_increment(increment_value)
case TwisterStatus.BLOCK:
results.blocked_cases_increment(increment_value)
case TwisterStatus.SKIP:
results.skipped_cases_increment(increment_value)
case TwisterStatus.FILTER:
results.filtered_cases_increment(increment_value)
case TwisterStatus.ERROR:
results.error_cases_increment(increment_value)
case TwisterStatus.FAIL:
results.failed_cases_increment(increment_value)
# Statuses that should not appear.
# Crashing Twister at this point would be counterproductive,
# but having those statuses in this part of processing is an error.
case TwisterStatus.NONE:
results.none_cases_increment(increment_value)
logger.error(f'A None status detected in instance {instance.name},'
f' test case {tc.name}.')
case TwisterStatus.STARTED:
results.started_cases_increment(increment_value)
logger.error(f'A started status detected in instance {instance.name},'
f' test case {tc.name}.')
case _:
logger.error(f'An unknown status "{tc.status}" detected in instance {instance.name},'
f' test case {tc.name}.')
def report_out(self, results):
total_to_do = results.total
total_to_do = results.total - results.skipped_filter
total_tests_width = len(str(total_to_do))
results.done += 1
results.done_increment()
instance = self.instance
if results.iteration == 1:
results.cases += len(instance.testcases)
results.cases_increment(len(instance.testcases))
self._add_instance_testcases_to_status_counts(instance, results)
status = f'{TwisterStatus.get_color(instance.status)}{str.upper(instance.status)}{Fore.RESET}'
if instance.status in [TwisterStatus.ERROR, TwisterStatus.FAIL]:
if instance.status == TwisterStatus.ERROR:
results.error += 1
txt = " ERROR "
results.error_increment()
else:
results.failed += 1
txt = " FAILED "
results.failed_increment()
if self.options.verbose:
status = Fore.RED + txt + Fore.RESET + instance.reason
status += " " + instance.reason
else:
logger.error(
"{:<25} {:<50} {}{}{}: {}".format(
"{:<25} {:<50} {}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
txt,
Fore.RESET,
status,
instance.reason))
if not self.options.verbose:
self.log_info_file(self.options.inline_logs)
elif instance.status in [TwisterStatus.SKIP, TwisterStatus.FILTER]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
# test cases skipped at the test instance level
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == TwisterStatus.SKIP:
results.skipped_configs_increment()
elif instance.status == TwisterStatus.FILTER:
results.skipped_configs_increment()
elif instance.status == TwisterStatus.PASS:
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
results.passed_increment()
for case in instance.testcases:
# test cases skipped at the test case level
if case.status == TwisterStatus.SKIP:
results.skipped_cases += 1
results.skipped_cases_increment()
elif instance.status == TwisterStatus.NOTRUN:
status = Fore.CYAN + "NOT RUN" + Fore.RESET
results.notrun += 1
results.notrun_increment()
for case in instance.testcases:
if case.status == TwisterStatus.SKIP:
results.skipped_cases += 1
results.skipped_cases_increment()
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
@ -1140,7 +1397,7 @@ class ProjectBuilder(FilterBuilder):
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.options.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do , instance.platform.name,
results.done - results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if self.options.verbose > 1:
@ -1155,22 +1412,24 @@ class ProjectBuilder(FilterBuilder):
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
completed_perc = int((float(results.done - results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("INFO - Total complete: %s%4d/%4d%s %2d%% built (not run): %4d, skipped: %s%4d%s, failed: %s%4d%s, error: %s%4d%s\r" % (
Fore.GREEN,
results.done,
sys.stdout.write("INFO - Total complete: %s%4d/%4d%s %2d%% built (not run): %s%4d%s, filtered: %s%4d%s, failed: %s%4d%s, error: %s%4d%s\r" % (
TwisterStatus.get_color(TwisterStatus.PASS),
results.done - results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
TwisterStatus.get_color(TwisterStatus.NOTRUN),
results.notrun,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
Fore.RESET,
TwisterStatus.get_color(TwisterStatus.SKIP) if results.skipped_configs > 0 else Fore.RESET,
results.skipped_configs,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
TwisterStatus.get_color(TwisterStatus.FAIL) if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET,
Fore.RED if results.error > 0 else Fore.RESET,
TwisterStatus.get_color(TwisterStatus.ERROR) if results.error > 0 else Fore.RESET,
results.error,
Fore.RESET
)
@ -1377,15 +1636,16 @@ class TwisterRunner:
self.update_counting_before_pipeline()
while True:
self.results.iteration += 1
self.results.iteration_increment()
if self.results.iteration > 1:
logger.info("%d Iteration:" % (self.results.iteration))
time.sleep(self.options.retry_interval) # waiting for the system to settle down
self.results.done = self.results.total - self.results.failed - self.results.error
self.results.done = self.results.total - self.results.failed
self.results.failed = 0
if self.options.retry_build_errors:
self.results.error = 0
self.results.done -= self.results.error
else:
self.results.done = self.results.skipped_filter
@ -1422,16 +1682,16 @@ class TwisterRunner:
'''
for instance in self.instances.values():
if instance.status == TwisterStatus.FILTER and not instance.reason == 'runtime filter':
self.results.skipped_filter += 1
self.results.skipped_configs += 1
self.results.skipped_cases += len(instance.testsuite.testcases)
self.results.cases += len(instance.testsuite.testcases)
self.results.skipped_filter_increment()
self.results.skipped_configs_increment()
self.results.filtered_cases_increment(len(instance.testsuite.testcases))
self.results.cases_increment(len(instance.testsuite.testcases))
elif instance.status == TwisterStatus.ERROR:
self.results.error += 1
self.results.error_increment()
def show_brief(self):
logger.info("%d test scenarios (%d test instances) selected, "
"%d configurations skipped (%d by static filter, %d at runtime)." %
"%d configurations filtered (%d by static filter, %d at runtime)." %
(len(self.suites), len(self.instances),
self.results.skipped_configs,
self.results.skipped_filter,
@ -1451,6 +1711,9 @@ class TwisterRunner:
if instance.status != TwisterStatus.NONE:
instance.retries += 1
instance.status = TwisterStatus.NONE
# Previous states should be removed from the stats
if self.results.iteration > 1:
ProjectBuilder._add_instance_testcases_to_status_counts(instance, self.results, decrement=True)
# Check if cmake package_helper script can be run in advance.
instance.filter_stages = []

View file

@ -206,20 +206,31 @@ def test_executioncounter(capfd):
sys.stderr.write(err)
assert (
f'--------------------------------\n'
f'Total test suites: 12\n'
f'Total test cases: 25\n'
f'Executed test cases: 19\n'
f'Skipped test cases: 6\n'
f'Completed test suites: 9\n'
f'Passing test suites: 6\n'
f'Built only test suites: 0\n'
f'Failing test suites: 1\n'
f'Skipped test suites: 3\n'
f'Skipped test suites (runtime): 1\n'
f'Skipped test suites (filter): 2\n'
f'Errors: 2\n'
f'--------------------------------'
'--------------------------------------------------\n'
'Total test suites: 12\n'
'Processed test suites: 9\n'
'├─ Filtered test suites (static): 2\n'
'└─ Completed test suites: 7\n'
' ├─ Filtered test suites (at runtime): 1\n'
' ├─ Passed test suites: 6\n'
' ├─ Built only test suites: 0\n'
' ├─ Failed test suites: 1\n'
' └─ Errors in test suites: 2\n'
'\n'
'Filtered test suites: 3\n'
'├─ Filtered test suites (static): 2\n'
'└─ Filtered test suites (at runtime): 1\n'
'---------------------- ----------------------\n'
'Total test cases: 25\n'
'├─ Filtered test cases: 0\n'
'├─ Skipped test cases: 6\n'
'└─ Executed test cases: 19\n'
' ├─ Passed test cases: 0\n'
' ├─ Built only test cases: 0\n'
' ├─ Blocked test cases: 0\n'
' ├─ Failed test cases: 0\n'
' └─ Errors in test cases: 0\n'
'--------------------------------------------------\n'
) in out
assert ec.cases == 25
@ -1547,7 +1558,7 @@ def test_projectbuilder_process(
assert pb.instance.status == expected_status
assert pb.instance.reason == expected_reason
assert results_mock.skipped_runtime == expected_skipped
assert results_mock.skipped_runtime_increment.call_args_list == [mock.call()] * expected_skipped
if expected_missing:
pb.instance.add_missing_case_status.assert_called_with(*expected_missing)
@ -1936,16 +1947,16 @@ TESTDATA_13 = [
TwisterStatus.ERROR, True, True, False,
['INFO 20/25 dummy platform' \
' dummy.testsuite.name' \
' ERROR dummy reason (cmake)'],
' ERROR dummy reason (cmake)'],
None
),
(
TwisterStatus.FAIL, False, False, False,
['ERROR dummy platform' \
' dummy.testsuite.name' \
' FAILED : dummy reason'],
' FAILED: dummy reason'],
'INFO - Total complete: 20/ 25 80%' \
' built (not run): 0, skipped: 3, failed: 3, error: 1'
' built (not run): 0, filtered: 3, failed: 3, error: 1'
),
(
TwisterStatus.SKIP, True, False, False,
@ -1958,7 +1969,7 @@ TESTDATA_13 = [
TwisterStatus.FILTER, False, False, False,
[],
'INFO - Total complete: 20/ 25 80%' \
' built (not run): 0, skipped: 4, failed: 2, error: 1'
' built (not run): 0, filtered: 4, failed: 2, error: 1'
),
(
TwisterStatus.PASS, True, False, True,
@ -1979,7 +1990,7 @@ TESTDATA_13 = [
'unknown status', False, False, False,
['Unknown status = unknown status'],
'INFO - Total complete: 20/ 25 80%'
' built (not run): 0, skipped: 3, failed: 2, error: 1\r'
' built (not run): 0, filtered: 3, failed: 2, error: 1\r'
)
]
@ -2026,21 +2037,49 @@ def test_projectbuilder_report_out(
pb.options.seed = 123
pb.log_info_file = mock.Mock()
results_mock = mock.Mock()
results_mock = mock.Mock(
total = 25,
done = 19,
passed = 17,
notrun = 0,
failed = 2,
skipped_configs = 3,
skipped_runtime = 0,
skipped_filter = 0,
error = 1,
cases = 0,
filtered_cases = 0,
skipped_cases = 4,
failed_cases = 0,
error_cases = 0,
blocked_cases = 0,
passed_cases = 0,
none_cases = 0,
started_cases = 0
)
results_mock.iteration = 1
results_mock.total = 25
results_mock.done = 19
results_mock.passed = 17
results_mock.notrun = 0
results_mock.skipped_configs = 3
results_mock.skipped_cases = 4
results_mock.failed = 2
results_mock.error = 1
results_mock.cases = 0
def results_done_increment(value=1, decrement=False):
results_mock.done += value * (-1 if decrement else 1)
results_mock.done_increment = results_done_increment
def skipped_configs_increment(value=1, decrement=False):
results_mock.skipped_configs += value * (-1 if decrement else 1)
results_mock.skipped_configs_increment = skipped_configs_increment
def skipped_filter_increment(value=1, decrement=False):
results_mock.skipped_filter += value * (-1 if decrement else 1)
results_mock.skipped_filter_increment = skipped_filter_increment
def skipped_runtime_increment(value=1, decrement=False):
results_mock.skipped_runtime += value * (-1 if decrement else 1)
results_mock.skipped_runtime_increment = skipped_runtime_increment
def failed_increment(value=1, decrement=False):
results_mock.failed += value * (-1 if decrement else 1)
results_mock.failed_increment = failed_increment
def notrun_increment(value=1, decrement=False):
results_mock.notrun += value * (-1 if decrement else 1)
results_mock.notrun_increment = notrun_increment
pb.report_out(results_mock)
assert results_mock.cases == 25
assert results_mock.cases_increment.call_args_list == [mock.call(25)]
trim_actual_log = re.sub(
r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])',
@ -2449,6 +2488,10 @@ def test_twisterrunner_run(
results_mock().failed = 2
results_mock().total = 9
def iteration_increment(value=1, decrement=False):
results_mock().iteration += value * (-1 if decrement else 1)
results_mock().iteration_increment = iteration_increment
with mock.patch('twisterlib.runner.ExecutionCounter', results_mock), \
mock.patch('twisterlib.runner.BaseManager', manager_mock), \
mock.patch('twisterlib.runner.GNUMakeJobClient.from_environ',
@ -2519,18 +2562,45 @@ def test_twisterrunner_update_counting_before_pipeline():
tr = TwisterRunner(instances, suites, env=env_mock)
tr.results = mock.Mock(
skipped_filter = 0,
total = 0,
done = 0,
passed = 0,
failed = 0,
skipped_configs = 0,
skipped_cases = 0,
skipped_runtime = 0,
skipped_filter = 0,
error = 0,
cases = 0,
error = 0
filtered_cases = 0,
skipped_cases = 0,
failed_cases = 0,
error_cases = 0,
blocked_cases = 0,
passed_cases = 0,
none_cases = 0,
started_cases = 0
)
def skipped_configs_increment(value=1, decrement=False):
tr.results.skipped_configs += value * (-1 if decrement else 1)
tr.results.skipped_configs_increment = skipped_configs_increment
def skipped_filter_increment(value=1, decrement=False):
tr.results.skipped_filter += value * (-1 if decrement else 1)
tr.results.skipped_filter_increment = skipped_filter_increment
def error_increment(value=1, decrement=False):
tr.results.error += value * (-1 if decrement else 1)
tr.results.error_increment = error_increment
def cases_increment(value=1, decrement=False):
tr.results.cases += value * (-1 if decrement else 1)
tr.results.cases_increment = cases_increment
def filtered_cases_increment(value=1, decrement=False):
tr.results.filtered_cases += value * (-1 if decrement else 1)
tr.results.filtered_cases_increment = filtered_cases_increment
tr.update_counting_before_pipeline()
assert tr.results.skipped_filter == 1
assert tr.results.skipped_configs == 1
assert tr.results.skipped_cases == 4
assert tr.results.filtered_cases == 4
assert tr.results.cases == 4
assert tr.results.error == 1
@ -2558,7 +2628,7 @@ def test_twisterrunner_show_brief(caplog):
tr.show_brief()
log = '2 test scenarios (5 test instances) selected,' \
' 4 configurations skipped (3 by static filter, 1 at runtime).'
' 4 configurations filtered (3 by static filter, 1 at runtime).'
assert log in caplog.text
@ -2609,6 +2679,7 @@ def test_twisterrunner_add_tasks_to_queue(
tr.get_cmake_filter_stages = mock.Mock(
side_effect=mock_get_cmake_filter_stages
)
tr.results = mock.Mock(iteration=0)
pipeline_mock = mock.Mock()

View file

@ -13,6 +13,7 @@ import pytest
import sys
import re
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@ -71,5 +72,5 @@ class TestDevice:
assert str(sys_exit.value) == '1'
expected_line = r'seed_native_sim.dummy FAILED Failed \(native (\d+\.\d+)s/seed: {}\)'.format(seed[0])
expected_line = r'seed_native_sim.dummy FAILED Failed \(native (\d+\.\d+)s/seed: {}\)'.format(seed[0])
assert re.search(expected_line, err)

View file

@ -13,6 +13,7 @@ import pytest
import sys
import re
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
@ -45,7 +46,7 @@ class TestError:
),
(
'--overflow-as-errors',
r'always_overflow.dummy ERROR Build failure \(build\)'
r'always_overflow.dummy ERROR Build failure \(build\)'
)
]

View file

@ -28,6 +28,7 @@ class TestPlatform:
{
'selected_test_scenarios': 3,
'selected_test_instances': 9,
'executed_test_instances': 6,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
@ -48,6 +49,7 @@ class TestPlatform:
{
'selected_test_scenarios': 1,
'selected_test_instances': 3,
'executed_test_instances': 0,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
@ -190,8 +192,9 @@ class TestPlatform:
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64'],
{
'passed_configurations': 2,
'selected_test_instances': 6,
'passed_configurations': 2,
'executed_test_instances': 3,
'executed_on_platform': 2,
'only_built': 1,
}
@ -217,7 +220,7 @@ class TestPlatform:
sys.stderr.write(err)
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed'
r' (?P<test_instances>[0-9]+) executed test configurations passed'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
@ -229,7 +232,7 @@ class TestPlatform:
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
expected['executed_test_instances']
built_search = re.search(built_regex, err, re.MULTILINE)
@ -262,22 +265,31 @@ class TestPlatform:
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations skipped' \
r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' (?P<executed_test_instances>[0-9]+) executed test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
r' (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
r' with (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
case_regex = r'^INFO - In total (?P<executed_test_cases>[0-9]+)' \
r' test cases were executed, (?P<skipped_test_cases>[0-9]+) skipped' \
r' on (?P<platform_count>[0-9]+) out of total [0-9]+ platforms' \
r' \([0-9]+\.[0-9]+%\)$'
case_regex = r'^INFO - (?P<passed_cases>[0-9]+) of' \
r' (?P<executed_test_cases>[0-9]+) executed test cases passed' \
r' \([0-9]+\.[0-9]+%\)' \
r'(?:, (?P<blocked_cases>[0-9]+) blocked)?' \
r'(?:, (?P<failed_cases>[0-9]+) failed)?' \
r'(?:, (?P<errored_cases>[0-9]+) errored)?' \
r'(?:, (?P<none_cases>[0-9]+) without a status)?' \
r' on (?P<platform_count>[0-9]+) out of total' \
r' (?P<total_platform_count>[0-9]+) platforms \([0-9]+\.[0-9]+%\)'
skip_regex = r'(?P<skipped_test_cases>[0-9]+) selected test cases not executed:' \
r'(?: (?P<skipped_cases>[0-9]+) skipped)?' \
r'(?:, (?P<filtered_cases>[0-9]+) filtered)?' \
r'.'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
@ -306,27 +318,32 @@ class TestPlatform:
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(pass_search.group('built_configurations')) == \
expected['built_configurations']
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert int(pass_search.group('executed_test_instances')) == \
expected['executed_test_instances']
if expected['failed_configurations']:
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
if expected['errored_configurations']:
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
case_search = re.search(case_regex, err, re.MULTILINE)
assert case_search
assert int(case_search.group('executed_test_cases')) == \
expected['executed_test_cases']
assert int(case_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
assert int(case_search.group('platform_count')) == \
expected['platform_count']
if expected['skipped_test_cases']:
skip_search = re.search(skip_regex, err, re.MULTILINE)
assert skip_search
assert int(skip_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search

View file

@ -131,7 +131,7 @@ class TestRunner:
['dummy.agnostic.group2 SKIPPED: Command line testsuite tag filter',
'dummy.agnostic.group1.subgroup2 SKIPPED: Command line testsuite tag filter',
'dummy.agnostic.group1.subgroup1 SKIPPED: Command line testsuite tag filter',
r'0 of 4 test configurations passed \(0.00%\), 0 built \(not run\), 0 failed, 0 errored, 4 skipped'
r'0 of 0 executed test configurations passed \(0.00%\), 0 built \(not run\), 0 failed, 0 errored'
]
),
(
@ -139,14 +139,14 @@ class TestRunner:
['qemu_x86/atom'],
['subgrouped'],
['dummy.agnostic.group2 SKIPPED: Command line testsuite tag filter',
r'1 of 4 test configurations passed \(50.00%\), 1 built \(not run\), 0 failed, 0 errored, 2 skipped'
r'1 of 2 executed test configurations passed \(50.00%\), 1 built \(not run\), 0 failed, 0 errored'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['qemu_x86/atom'],
['agnostic', 'device'],
[r'2 of 4 test configurations passed \(66.67%\), 1 built \(not run\), 0 failed, 0 errored, 1 skipped']
[r'2 of 3 executed test configurations passed \(66.67%\), 1 built \(not run\), 0 failed, 0 errored']
),
]
TESTDATA_10 = [
@ -261,22 +261,32 @@ class TestRunner:
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations skipped' \
r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' (?P<test_instances>[0-9]+) executed test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
r' (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
r' (?P<errored_configurations>[0-9]+) errored, with' \
r' (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
case_regex = r'^INFO - (?P<passed_cases>[0-9]+) of' \
r' (?P<executed_test_cases>[0-9]+) executed test cases passed' \
r' \([0-9]+\.[0-9]+%\)' \
r'(?:, (?P<blocked_cases>[0-9]+) blocked)?' \
r'(?:, (?P<failed_cases>[0-9]+) failed)?' \
r'(?:, (?P<errored_cases>[0-9]+) errored)?' \
r'(?:, (?P<none_cases>[0-9]+) without a status)?' \
r' on (?P<platform_count>[0-9]+) out of total' \
r' (?P<total_platform_count>[0-9]+) platforms \([0-9]+\.[0-9]+%\)'
skip_regex = r'(?P<skipped_test_cases>[0-9]+) selected test cases not executed:' \
r'(?: (?P<skipped_cases>[0-9]+) skipped)?' \
r'(?:, (?P<filtered_cases>[0-9]+) filtered)?' \
r'.'
case_regex = r'^INFO - In total (?P<executed_test_cases>[0-9]+)' \
r' test cases were executed, (?P<skipped_test_cases>[0-9]+) skipped' \
r' on (?P<platform_count>[0-9]+) out of total [0-9]+ platforms' \
r' \([0-9]+\.[0-9]+%\)$'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
@ -312,19 +322,21 @@ class TestRunner:
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
case_search = re.search(case_regex, err, re.MULTILINE)
assert case_search
assert int(case_search.group('executed_test_cases')) == \
expected['executed_test_cases']
assert int(case_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
assert int(case_search.group('platform_count')) == \
expected['platform_count']
if expected['skipped_test_cases']:
skip_search = re.search(skip_regex, err, re.MULTILINE)
assert skip_search
assert int(skip_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
@ -384,7 +396,7 @@ class TestRunner:
sys.stderr.write(err)
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed'
r' (?P<test_instances>[0-9]+) executed test configurations passed'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
@ -614,14 +626,18 @@ class TestRunner:
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations filtered' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' (?P<test_instances>[0-9]+) executed test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<built_configurations>[0-9]+) built \(not run\),' \
r' (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
r' (?P<errored_configurations>[0-9]+) errored, with' \
r' (?:[0-9]+|no) warnings in [0-9]+\.[0-9]+ seconds.$'
out, err = capfd.readouterr()
sys.stdout.write(out)
@ -631,6 +647,11 @@ class TestRunner:
assert re.search(
r'one_fail_one_pass.agnostic.group1.subgroup2 on qemu_x86/atom failed \(.*\)', err)
select_search = re.search(select_regex, err, re.MULTILINE)
assert int(select_search.group('skipped_configurations')) == \
expected['skipped_configurations']
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
@ -644,8 +665,6 @@ class TestRunner:
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert str(sys_exit.value) == '1'