Add total vs cumulative time to tests + disable some tests on CI for speedup (#3039)

* Remove {next,step}* tests from all commands tests

This optimizes CI test run; those commands are also kinda tested
elsewhere, maybe not fully, but let's leave it as it is for now...

* add total vs cumulative time for tests
pull/3040/head
Disconnect3d 6 months ago committed by GitHub
parent f5c91fb742
commit bea36c8e08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -14,8 +14,14 @@ BINARY = tests.binaries.get("heap_bins.out")
disallowed_commands = {
# requires user input
"ipi",
# takes too long
# Already tested by other tests & takes too long
"pc",
"nextcall",
"nextjump",
"nextproginstr",
"nextret",
"nextsyscall",
"stepret",
"stepsyscall",
}

@ -11,10 +11,22 @@ import tests
REFERENCE_BINARY = tests.binaries.get("reference-binary.out")
CRASH_SIMPLE_BINARY = tests.binaries.get("crash_simple.out.hardcoded")
NEXT_COMMANDS = (
"pc",
"nextcall",
"nextjmp",
"nextproginstr",
"nextret",
"nextsyscall",
"stepret",
"stepsyscall",
)
def test_command_nextproginstr_binary_not_running():
out = gdb.execute("nextproginstr", to_string=True)
assert out == "nextproginstr: The program is not being run.\n"
@pytest.mark.parametrize("command", NEXT_COMMANDS)
def test_next_commands_binary_not_running(command):
out = gdb.execute(command, to_string=True)
assert out == f"{command}: The program is not being run.\n"
def test_command_nextproginstr(start_binary):
@ -48,10 +60,7 @@ def test_command_nextproginstr(start_binary):
assert out == "The pc is already at the binary objfile code. Not stepping.\n"
@pytest.mark.parametrize(
"command",
("nextcall", "nextjump", "nextproginstr", "nextret", "nextsyscall", "stepret", "stepsyscall"),
)
@pytest.mark.parametrize("command", NEXT_COMMANDS)
def test_next_command_doesnt_freeze_crashed_binary(start_binary, command):
start_binary(CRASH_SIMPLE_BINARY)

@ -165,6 +165,7 @@ def run_test(
class TestStats:
def __init__(self):
self.total_duration = 0
self.fail_tests = 0
self.pass_tests = 0
self.skip_tests = 0
@ -196,6 +197,9 @@ class TestStats:
skip_reason = " " + (
process.stdout.split(test_status)[1].split("\n\n\x1b[33m")[0].replace("\n", "")
)
self.total_duration += duration
print(f"{test_case:<70} {test_status} {duration:.2f}s{skip_reason}")
# Only show the output of failed tests unless the verbose flag was used
@ -212,19 +216,15 @@ def run_tests_and_print_stats(
gdbinit_path: str,
test_dir_path: str,
):
start = time.time()
stats = TestStats()
if args.cov:
print("Running tests with coverage")
start = time.time()
if args.serial:
for test in tests_list:
result = run_test(test, args, gdb_path, gdbinit_path, reserve_port())
stats.handle_test_result(result, args, test_dir_path)
else:
print("")
print("Running tests in parallel")
print("\nRunning tests in parallel")
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
for test in tests_list:
executor.submit(
@ -234,15 +234,15 @@ def run_tests_and_print_stats(
)
end = time.time()
seconds = int(end - start)
print(f"Tests completed in {seconds} seconds")
duration = end - start
print("")
print("*********************************")
print("********* TESTS SUMMARY *********")
print("*********************************")
print(f"Tests Passed: {stats.pass_tests}")
print(f"Time Spent : {duration:.2f}s (cumulative: {stats.total_duration:.2f}s)")
print(f"Tests Passed : {stats.pass_tests}")
print(f"Tests Skipped: {stats.skip_tests}")
print(f"Tests Failed: {stats.fail_tests}")
print(f"Tests Failed : {stats.fail_tests}")
if stats.fail_tests != 0:
print("\nFailing tests:")

Loading…
Cancel
Save