Example #1
0
def test_validate_doesnt_explode(experiments, monkeypatch):
    m = Mock()
    monkeypatch.setattr(jetstream.analysis, "dry_run_query", m)
    x = experiments[0]
    config = AnalysisSpec.default_for_experiment(x).resolve(x)
    Analysis("spam", "eggs", config).validate()
    assert m.call_count == 2
Example #2
0
def test_fenix_experiments_use_right_datasets(fenix_experiments, monkeypatch):
    for experiment in fenix_experiments:
        called = 0

        def dry_run_query(query):
            nonlocal called
            called = called + 1
            dataset = re.sub(r"[^A-Za-z0-9_]", "_", experiment.app_id)
            assert dataset in query
            assert query.count(dataset) == query.count("org_mozilla")

        monkeypatch.setattr("jetstream.analysis.dry_run_query", dry_run_query)
        config = AnalysisSpec.default_for_experiment(experiment).resolve(
            experiment)
        Analysis("spam", "eggs", config).validate()
        assert called == 2
Example #3
0
 def test_simple_workflow(self, cli_experiments, monkeypatch):
     monkeypatch.setattr("jetstream.cli.export_metadata", Mock())
     fake_analysis = Mock()
     experiment = cli_experiments.experiments[0]
     spec = AnalysisSpec.default_for_experiment(experiment)
     strategy = cli.SerialExecutorStrategy(
         project_id="spam",
         dataset_id="eggs",
         bucket="bucket",
         analysis_class=fake_analysis,
         experiment_getter=lambda: cli_experiments,
         config_getter=external_config.ExternalConfigCollection,
     )
     config = spec.resolve(experiment)
     run_date = dt.datetime(2020, 10, 31, tzinfo=UTC)
     strategy.execute([(config, run_date)])
     fake_analysis.assert_called_once_with("spam", "eggs", config, None)
     fake_analysis().run.assert_called_once_with(run_date)
Example #4
0
    def test_simple_workflow(self, cli_experiments):
        experiment = cli_experiments.experiments[0]
        spec = AnalysisSpec.default_for_experiment(experiment)
        config = spec.resolve(experiment)

        with mock.patch(
                "jetstream.cli.submit_workflow") as submit_workflow_mock:
            strategy = cli.ArgoExecutorStrategy(
                "spam",
                "eggs",
                "bucket",
                "zone",
                "cluster_id",
                False,
                None,
                None,
                lambda: cli_experiments,
            )
            run_date = dt.datetime(2020, 10, 31, tzinfo=UTC)
            strategy.execute([(config, run_date)])

            submit_workflow_mock.assert_called_once_with(
                project_id="spam",
                zone="zone",
                cluster_id="cluster_id",
                workflow_file=strategy.RUN_WORKFLOW,
                parameters={
                    "experiments": [{
                        "slug": "my_cool_experiment",
                        "dates": ["2020-10-31"]
                    }],
                    "project_id":
                    "spam",
                    "dataset_id":
                    "eggs",
                    "bucket":
                    "bucket",
                },
                monitor_status=False,
                cluster_ip=None,
                cluster_cert=None,
            )
Example #5
0
 def validate(self) -> None:
     if self.platform not in PLATFORM_CONFIGS:
         raise ValueError(f"Platform '{self.platform}' is unsupported.")
     app_id = PLATFORM_CONFIGS[self.platform].app_id
     dummy_experiment = jetstream.experimenter.Experiment(
         experimenter_slug="dummy-experiment",
         normandy_slug="dummy_experiment",
         type="v6",
         status="Live",
         branches=[],
         end_date=None,
         reference_branch="control",
         is_high_population=False,
         start_date=dt.datetime.now(UTC),
         proposed_enrollment=14,
         app_id=app_id,
         app_name=self.platform,  # seems to be unused
     )
     spec = AnalysisSpec.default_for_experiment(dummy_experiment)
     spec.merge_outcome(self.spec)
     conf = spec.resolve(dummy_experiment)
     Analysis("no project", "no dataset", conf).validate()
Example #6
0
 def validate(self, experiment: jetstream.experimenter.Experiment) -> None:
     spec = AnalysisSpec.default_for_experiment(experiment)
     spec.merge(self.spec)
     conf = spec.resolve(experiment)
     Analysis("no project", "no dataset", conf).validate()
Example #7
0
def test_is_high_population_check(experiments):
    x = experiments[3]
    config = AnalysisSpec.default_for_experiment(x).resolve(x)

    with pytest.raises(HighPopulationException):
        Analysis("spam", "eggs", config).check_runnable()