def test_run_experiment_summary_no_csv_directory(): # rsmsummarize experiment where the specified directory # does not contain any rsmtool experiments source = 'summary-no-output-dir' config_file = join(rsmtool_test_dir, 'data', 'experiments', source, 'rsmsummarize.json') do_run_summary(source, config_file)
def test_run_experiment_summary_wrong_directory(): # rsmsummarize experiment where the specified directory # does not exist source = 'summary-wrong-directory' config_file = join(rsmtool_test_dir, 'data', 'experiments', source, 'rsmsummarize.json') do_run_summary(source, config_file)
def test_run_experiment_summary_too_many_jsons(): # rsmsummarize experiment where the specified directory # does contains several jsons files and the user # specified experiment names source = 'summary-too-many-jsons' config_file = join(rsmtool_test_dir, 'data', 'experiments', source, 'rsmsummarize.json') do_run_summary(source, config_file)
def test_run_experiment_lr_summary_with_object(): # basic rsmsummarize experiment comparing several rsmtool experiments source = 'lr-self-summary-object' config_file = join(rsmtool_test_dir, 'data', 'experiments', source, 'rsmsummarize.json') config_dict = { "summary_id": "model_comparison", "experiment_dirs": ["lr-subgroups", "lr-subgroups", "lr-subgroups"], "description": "Comparison of rsmtool experiment with itself." } config_parser = ConfigurationParser() config_parser.load_config_from_dict(config_dict) config_obj = config_parser.normalize_validate_and_process_config( context='rsmsummarize') config_obj = config_file do_run_summary(source, config_obj) html_report = join('test_outputs', source, 'report', 'model_comparison_report.html') output_dir = join('test_outputs', source, 'output') expected_output_dir = join(rsmtool_test_dir, 'data', 'experiments', source, 'output') csv_files = glob(join(output_dir, '*.csv')) for csv_file in csv_files: csv_filename = basename(csv_file) expected_csv_file = join(expected_output_dir, csv_filename) if exists(expected_csv_file): yield check_file_output, csv_file, expected_csv_file yield check_report, html_report
def test_run_experiment_lr_summary_with_object(): # basic rsmsummarize experiment comparing several rsmtool experiments source = 'lr-self-summary-object' config_file = join(rsmtool_test_dir, 'data', 'experiments', source, 'rsmsummarize.json') config_dict = {"summary_id": "model_comparison", "experiment_dirs": ["lr-subgroups", "lr-subgroups", "lr-subgroups"], "description": "Comparison of rsmtool experiment with itself."} config_parser = ConfigurationParser() config_parser.load_config_from_dict(config_dict) config_obj = config_parser.normalize_validate_and_process_config(context='rsmsummarize') config_obj = config_file do_run_summary(source, config_obj) html_report = join('test_outputs', source, 'report', 'model_comparison_report.html') output_dir = join('test_outputs', source, 'output') expected_output_dir = join(rsmtool_test_dir, 'data', 'experiments', source, 'output') csv_files = glob(join(output_dir, '*.csv')) for csv_file in csv_files: csv_filename = basename(csv_file) expected_csv_file = join(expected_output_dir, csv_filename) if exists(expected_csv_file): yield check_file_output, csv_file, expected_csv_file yield check_report, html_report