def test_single_job_regressions(
    descriptor_filename,
    expected_yaml,
    shared_datadir,
    config_args,
    config_env,
    file_regression: FileRegressionFixture,
    benchmark_event,
    scripts,
    parent_action_id,
):
    random_object = random.Random()
    random_object.seed(1)

    if parent_action_id:
        benchmark_event.parent_action_id = parent_action_id
        expected_yaml = expected_yaml + f"-{parent_action_id}"

    descriptor_data = toml.load(str(shared_datadir / descriptor_filename))
    fetched_data_sources = generate_fetched_data_sources(descriptor_data)
    fetched_models = generate_fetched_models(descriptor_data)
    transpiler_config = create_executor_config(config_args, config_env)
    yaml_spec = create_job_yaml_spec(
        descriptor_data,
        transpiler_config,
        fetched_data_sources,
        fetched_models,
        scripts,
        JOB_ID,
        extra_bai_config_args=dict(random_object=random_object),
        event=benchmark_event,
    )

    file_regression.check(yaml_spec, basename=expected_yaml, extension=".yaml")
 def test_stdout(self, setup_teardown,
                 file_regression: FileRegressionFixture):
     """
     Check the output against what it should be.
     """
     outputs = setup_teardown
     file_regression.check(outputs['stdout'])
Пример #3
0
def check_file_regression(
    data: Union[str, StringList],
    file_regression: FileRegressionFixture,
    extension: str = ".txt",
    **kwargs,
):
    r"""
	Check the given data against that in the reference file.

	:param data:
	:param file_regression: The file regression fixture for the test.
	:param extension: The extension of the reference file.
	:param \*\*kwargs: Additional keyword arguments passed to
		:meth:`pytest_regressions.file_regression.FileRegressionFixture.check`.

	.. seealso:: :meth:`.AdvancedFileRegressionFixture.check`
	"""

    __tracebackhide__ = True

    if isinstance(data, StringList):
        data = str(data)

    file_regression.check(data,
                          encoding="UTF-8",
                          extension=extension,
                          **kwargs)

    return True
Пример #4
0
        def test_results_file(self, setup_teardown,
                              file_regression: FileRegressionFixture):
            """
            Check the results file against what it should be.

            We don't use DataFrameRegression fixture because the results might include
            non-numeric data.
            """
            prefix = 'results'
            outputs = setup_teardown
            file_regression.check(outputs[prefix], basename=prefix)
Пример #5
0
 def _check_file_contents(
     self,
     results_folder: str,
     file_suffix: str,
     file_regression: FileRegressionFixture,
 ):
     file_path = self._get_matching_file_path(results_folder=results_folder,
                                              file_suffix=file_suffix)
     with open(file_path) as f:
         contents = f.read()
     file_regression.check(contents=contents)
Пример #6
0
 def _check_file_contents(
     self,
     results_folder: str,
     file_suffix: str,
     file_regression: FileRegressionFixture,
 ):
     file_path = self._get_matching_file_path(results_folder=results_folder,
                                              file_suffix=file_suffix)
     with open(file_path) as f:
         # correct whitespace to deal with fbcode demanding disk files not
         # have many trailing newlines
         contents = f.read().rstrip('\n') + '\n'
     file_regression.check(contents=contents)
Пример #7
0
def test_repr(compound, spectrum, molecule, file_regression: FileRegressionFixture):
	assert str(compound) == "Compound([Molecule(Dimethyl Phthalate, C10H10O4)])"
	assert repr(compound) == "<Compound([<Molecule(Dimethyl Phthalate, Formula({'C': 10, 'H': 10, 'O': 4}))>])>"

	compound = Compound(
			algo="FindByFormula",
			location={'m': 169.0893, "rt": 13.649, 'a': 29388223, 'y': 3377289},
			results=[molecule, molecule, molecule, molecule, molecule],
			spectra=[spectrum],
			compound_scores={"fbf": Score(62.90, flag_string="low score", flag_severity=2)},
			)
	file_regression.check(str(compound), encoding="UTF-8", extension="_str.txt")
	file_regression.check(repr(compound), encoding="UTF-8", extension="_repr.txt")
Пример #8
0
        def test_fill_stack(self, file_regression: FileRegressionFixture):
            """
            Check the expected output when filling up the stack.

            Request image/model slots from the stack, and check that the behavior is as
            expected.
            """

            seed = 0
            random.seed(seed)
            np.random.seed(seed)
            torch.manual_seed(seed)

            with testing_utils.tempdir() as tmpdir:

                # Params
                opt = {
                    'evals_per_image_model_combo': 2,
                    'models': ['model_1', 'model_2'],
                    'num_images': 3,
                    'stack_folder': tmpdir,
                }
                num_stack_slots = (opt['evals_per_image_model_combo'] *
                                   len(opt['models']) * opt['num_images'])
                num_workers = 5
                worker_id_to_remove = '2'
                stack_idx_to_remove_worker_from = 0

                # Create the stack
                stack = ImageStack(opt)

                with testing_utils.capture_output() as output:
                    for _ in range(num_stack_slots):
                        worker_id = random.randrange(num_workers)
                        _ = stack.get_next_image(str(worker_id))
                        print('STACK: ', stack.stack)
                    stack.remove_worker_from_stack(
                        worker=worker_id_to_remove,
                        stack_idx=stack_idx_to_remove_worker_from,
                    )
                    print('STACK: ', stack.stack)
                    stdout = output.getvalue()

                # Check the output against what it should be
                file_regression.check(contents=stdout)
Пример #9
0
def test_csv_parser(tmp_pathplus, file_regression: FileRegressionFixture):
    raw_results_dir = PathPlus(__file__).parent / "raw_results"

    json_results_dir = tmp_pathplus / "json_results"
    csv_results_dir = tmp_pathplus / "csv_results"

    parser = ResultParser(raw_results_dir, json_results_dir, csv_results_dir)

    dates = [
        "191121",
        "191126",
        "191128",
        "191206",
        "191211",
        "200124",
        "200128",
        "200129",
        "200206",
        "200218",
        "200221",
        "200227",
        "200303",
    ]

    dates = ['-'.join(re.findall("..", date)) for date in dates]
    parser.parse_directory_list(dates)

    csv_files = []
    json_files = []

    for date in dates:
        csv_files.append(csv_results_dir / date / "CSV Results Parsed.csv")
        json_files.append(json_results_dir / date / "results.json")

    concatenate_csv(*csv_files, outfile=tmp_pathplus / "All CSV Results.csv")
    concatenate_json(*json_files, outfile=tmp_pathplus / "All Results.json")

    file_regression.check((tmp_pathplus / "All CSV Results.csv").read_text(),
                          encoding="UTF-8",
                          extension=".csv")
    file_regression.check((tmp_pathplus / "All Results.json").read_text(),
                          encoding="UTF-8",
                          extension=".json")
def test_scheduled_job_regressions(
    descriptor_filename,
    expected_yaml,
    shared_datadir,
    config_args,
    config_env,
    file_regression: FileRegressionFixture,
    benchmark_event,
):
    random_object = random.Random()
    random_object.seed(1)

    descriptor_data = toml.load(str(shared_datadir / descriptor_filename))
    transpiler_config = create_executor_config(config_args, config_env)

    yaml_spec = create_scheduled_job_yaml_spec(descriptor_data,
                                               transpiler_config,
                                               JOB_ID,
                                               event=benchmark_event)
    file_regression.check(yaml_spec, basename=expected_yaml, extension=".yaml")
Пример #11
0
def test_parse_tariffs(file_regression: FileRegressionFixture, datadir):
	single_register_electricity_tariffs = json.loads(
			(datadir / "single_register_electricity_tariffs.json").read_text()
			)

	assert isinstance(_parse_tariffs(single_register_electricity_tariffs), RegionalTariffs)
	assert str(_parse_tariffs(single_register_electricity_tariffs)) == "RegionalTariffs(['direct_debit_monthly'])"

	file_regression.check(
			repr(_parse_tariffs(single_register_electricity_tariffs)), encoding="UTF-8", extension=".json5"
			)

	tariffs: Dict[str, Dict[str, Tariff]] = {}

	for gsp, payment_methods in single_register_electricity_tariffs.items():
		tariffs[gsp] = {}

		for method, tariff in payment_methods.items():
			tariffs[gsp][method] = Tariff(**tariff)

	assert repr(_parse_tariffs(single_register_electricity_tariffs)) == str(tariffs)
	assert repr(_parse_tariffs(single_register_electricity_tariffs)) == repr(tariffs)