def emulate_crash_test(): run_failing_bench( scripts_dir + 'correct_script.erl', env={'emulate_bench_crash': 'true'}, expected_log_message_regex= r"\[error\].*Benchmark received 'EXIT' from .* with reason {emulated_crash,\n\s*nothing_to_see_here,\n\s*please_move_along}," )
def time_assertions_fail_test(): run_failing_bench( scripts_bdl_dir + 'time_assertion_fail.bdl', env={}, expected_log_message_regex= r'''Benchmark result: FAILED.*1 assertions failed.*Assertion: \(\(p\*t > 40\) and \(not \(p\*t <= 40\)\)\).*was expected to hold for 40s.*but held for just''' )
def env_param_missing_test(): run_failing_bench( scripts_dir + 'env.erl', env={}, expected_log_message_regex= r'''Benchmark result: Unexpected error: {{{var_is_unbound,"pool_size"}''' )
def time_assertions_fail_test(): run_failing_bench( scripts_dir + 'time_assertion_fail.erl', env={}, expected_log_message_regex= r'''Benchmark result: FAILED.*1 assertions failed.*Assertion: print > 40.*was expected to hold for 40s.*but held for just''' )
def worker_provisioning_fail_test(): worker_commit = 'this_revision_does_not_exist' mzbench_repo = os.environ.get('MZBENCH_REPO', 'https://github.com/satori-com/mzbench') run_failing_bench( scripts_dir + 'worker_from_git.erl', env={'worker_branch': worker_commit, 'mzbench_repo': mzbench_repo}, expected_log_message_regex=r"Stage 'pipeline - provisioning': failed")
def worker_provisioning_fail_test(): worker_commit = 'this_revision_does_not_exist' mzbench_repo = os.environ.get('MZBENCH_REPO', 'https://github.com/mzbench/mzbench') run_failing_bench( scripts_dir + 'worker_from_git.erl', env={'worker_branch': worker_commit, 'mzbench_repo': mzbench_repo}, expected_log_message_regex=r"Stage 'pipeline - provisioning': failed")
def worker_provisioning_fail_test(): worker_commit = 'this_revision_does_not_exist' mzbench_repo = os.environ.get('MZBENCH_REPO', 'https://github.com/machinezone/mzbench') run_failing_bench( scripts_dir + 'worker_from_git.erl', env={'worker_branch': worker_commit, 'mzbench_repo': mzbench_repo}, expected_log_message_regex=r"Stage 'pipeline - provisioning': failed", check_log_function=lambda log:\ "Error: tried to stop mzbench node, but it didn't even start!"\ if len(re.findall('mzbench stop', log)) > len(re.findall('mzbench stop; true', log))\ else None)
def time_assertions_fail_test(): run_failing_bench(scripts_bdl_dir + 'time_assertion_fail.bdl', env={}, expected_log_message_regex=r'''Benchmark result: FAILED.*1 assertions failed.*Assertion: \(\(p\*t > 40\) and \(not \(p\*t <= 40\)\)\).*was expected to hold for 40s.*but held for just''')
def signal_timeout_test(): run_failing_bench(scripts_dir + 'signal_count_neg.erl', env={}, expected_log_message_regex=r'\[error\].*Worker.*has crashed: {timeout,{wait_signal,"A"}}')
def runtime_error_test(): run_failing_bench(scripts_dir + 'runtime_error.erl', expected_log_message_regex=r'\[error\].*Worker.*has crashed: "no_because_no"')
def time_assertions_fail_test(): run_failing_bench(scripts_dir + 'time_assertion_fail.erl', env={}, expected_log_message_regex=r'''Benchmark result: FAILED.*1 assertions failed.*Assertion: print > 40.*was expected to hold for 40s.*but held for just''')
def dynamic_deadlock_test(): run_failing_bench(scripts_bdl_dir + 'signal_dyn_deadlock.bdl', expected_log_message_regex=r'Dynamic deadlock detected')
def nobody_waits_for_signal_test(): run_failing_bench(scripts_dir + 'signal_error2.erl', expected_log_message_regex=r'Nobody sets signal') run_failing_bench(scripts_bdl_dir + 'signal_error2.bdl', expected_log_message_regex=r'Nobody sets signal')
def always_assertions_fail_test(): run_failing_bench(scripts_bdl_dir + 'always_assertion_fail.bdl', env={}, expected_log_message_regex=r'\[error\].*Interrupting benchmark because of failed asserts')
def env_param_missing_test(): run_failing_bench( scripts_dir + 'env.erl', env={}, expected_log_message_regex=r'''Var 'pool_size' is not defined''')
def nobody_sets_signal_in_loop_test(): run_failing_bench(scripts_dir + "signal_error4.erl", expected_log_message_regex=r"Nobody waits for signal")
def env_param_missing_test(): run_failing_bench(scripts_dir + 'env.erl', env={}, expected_log_message_regex=r'''\[error\] \[ API \] Stage 'pipeline - provisioning': failed\n\s*Benchmark has failed on provisioning with reason:\n\s*{substitution_error,variable_name_is_unbound,"pool_size",at_location,\n\s*"line 1: "}''')
def terminate_exception_test(): run_failing_bench(scripts_dir + 'terminate_error_check.erl', expected_log_message_regex= r'TERMINATE {error,"something bad happend",.*} "test"')
def nobody_sets_signal_test(): run_failing_bench(scripts_dir + 'signals_deadlock3.erl')
def nobody_waits_for_signal_test(): run_failing_bench(scripts_dir + 'signals_deadlock2.erl')
def signal_deadlock_test(): run_failing_bench(scripts_dir + 'signals_deadlock.erl')
def nobody_waits_for_signal_test(): run_failing_bench(scripts_dir + "signal_error2.erl", expected_log_message_regex=r"Nobody sets signal")
def terminate_exception_test(): run_failing_bench( scripts_dir + 'terminate_error_check.erl', expected_log_message_regex=r'TERMINATE {error,"something bad happend",.*} "test"')
def signal_deadlock_test(): run_failing_bench(scripts_dir + 'signal_error1.erl', expected_log_message_regex=r'Deadlock is posible') run_failing_bench(scripts_bdl_dir + 'signal_error1.bdl', expected_log_message_regex=r'Deadlock is posible')
def time_assertions_fail_test(): run_failing_bench(scripts_dir + 'time_assertion_fail.erl', env={}, expected_log_message_regex=r'\[error\].*Command execution failed.*Output: FAILED\n1 assertions failed\nAssertion: print > 40')
def nobody_sets_signal_in_loop_test(): run_failing_bench(scripts_dir + 'signal_error4.erl', expected_log_message_regex=r'Nobody waits for signal') run_failing_bench(scripts_bdl_dir + 'signal_error4.bdl', expected_log_message_regex=r'Nobody waits for signal')
def signal_deadlock_test(): run_failing_bench(scripts_dir + "signal_error1.erl", expected_log_message_regex=r"Deadlock is posible")
def env_param_missing_test(): run_failing_bench(scripts_dir + 'env.erl', env={}, expected_log_message_regex=r'''Benchmark result: Unexpected error: {{{var_is_unbound,"pool_size"}''')
def emulate_crash_test(): run_failing_bench(scripts_dir + 'correct_script.erl', env={'emulate_bench_crash': 'true'}, expected_log_message_regex=r"\[error\].*Benchmark received 'EXIT' from .* with reason {emulated_crash,\n\s*nothing_to_see_here,\n\s*please_move_along},")
def env_param_missing_test(): run_failing_bench(scripts_dir + 'env.erl', env={}, expected_log_message_regex=r'''\[error\].*Stage 'pipeline - running': failed.*Unexpected error: {{{var_is_unbound,"pool_size"}''')
def env_param_missing_test(): run_failing_bench(scripts_dir + 'env.erl', env={}, expected_log_message_regex=r'''Var 'pool_size' is not defined''')
def always_assertions_fail_test(): run_failing_bench(scripts_dir + 'always_assertion_fail.erl', env={}, expected_log_message_regex=r'\[error\].*Interrupting benchmark because of failed asserts')