Пример #1
0
def test_get_timelimits_if_ready(experiments):
    config = AnalysisSpec().resolve(experiments[0])
    config2 = AnalysisSpec().resolve(experiments[2])

    analysis = Analysis("test", "test", config)
    analysis2 = Analysis("test", "test", config2)

    date = dt.datetime(2019, 12, 1, tzinfo=pytz.utc) + timedelta(0)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.DAY, date) is None
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.WEEK, date) is None

    date = dt.datetime(2019, 12, 1, tzinfo=pytz.utc) + timedelta(2)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.DAY, date) is None
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.WEEK, date) is None

    date = dt.datetime(2019, 12, 1, tzinfo=pytz.utc) + timedelta(7)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.DAY, date)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.WEEK, date) is None

    date = dt.datetime(2019, 12, 1, tzinfo=pytz.utc) + timedelta(days=13)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.DAY, date)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.WEEK, date)

    date = dt.datetime(2020, 2, 29, tzinfo=pytz.utc)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.OVERALL,
                                             date) is None

    date = dt.datetime(2020, 3, 1, tzinfo=pytz.utc)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.OVERALL, date)
    assert analysis2._get_timelimits_if_ready(AnalysisPeriod.OVERALL,
                                              date) is None

    date = dt.datetime(2019, 12, 1, tzinfo=pytz.utc) + timedelta(days=34)
    assert analysis._get_timelimits_if_ready(AnalysisPeriod.DAYS_28, date)
Пример #2
0
def test_validate_doesnt_explode(experiments, monkeypatch):
    m = Mock()
    monkeypatch.setattr(jetstream.analysis, "dry_run_query", m)
    x = experiments[0]
    config = AnalysisSpec.default_for_experiment(x).resolve(x)
    Analysis("spam", "eggs", config).validate()
    assert m.call_count == 2
Пример #3
0
def test_regression_20200316():
    experiment_json = r"""
    {
      "experiment_url": "https://blah/experiments/search-tips-aka-nudges/",
      "type": "addon",
      "name": "Search Tips aka Nudges",
      "slug": "search-tips-aka-nudges",
      "public_name": "Search Tips",
      "public_description": "Search Tips are designed to increase engagement with the QuantumBar.",
      "status": "Live",
      "countries": [],
      "platform": "All Platforms",
      "start_date": 1578960000000,
      "end_date": 1584921600000,
      "population": "2% of Release Firefox 72.0 to 74.0",
      "population_percent": "2.0000",
      "firefox_channel": "Release",
      "firefox_min_version": "72.0",
      "firefox_max_version": "74.0",
      "addon_experiment_id": null,
      "addon_release_url": "https://bugzilla.mozilla.org/attachment.cgi?id=9120542",
      "pref_branch": null,
      "pref_name": null,
      "pref_type": null,
      "proposed_start_date": 1578960000000,
      "proposed_enrollment": 21,
      "proposed_duration": 69,
      "normandy_slug": "addon-search-tips-aka-nudges-release-72-74-bug-1603564",
      "normandy_id": 902,
      "other_normandy_ids": [],
      "variants": [
        {
          "description": "Standard address bar experience",
          "is_control": false,
          "name": "control",
          "ratio": 50,
          "slug": "control",
          "value": null,
          "addon_release_url": null,
          "preferences": []
        },
        {
          "description": "",
          "is_control": true,
          "name": "treatment",
          "ratio": 50,
          "slug": "treatment",
          "value": null,
          "addon_release_url": null,
          "preferences": []
        }
      ]
    }
    """
    experiment = ExperimentV1.from_dict(
        json.loads(experiment_json)).to_experiment()
    config = AnalysisSpec().resolve(experiment)
    analysis = Analysis("test", "test", config)
    analysis.run(current_date=dt.datetime(2020, 3, 16, tzinfo=pytz.utc),
                 dry_run=True)
Пример #4
0
    def analysis_mock_run(self, config, static_dataset, temporary_dataset,
                          project_id):
        orig = mozanalysis.experiment.Experiment.build_query

        def build_query_test_project(instance, *args, **kwargs):
            # to use the test project and dataset, we need to change the SQL query
            # generated by mozanalysis
            query = orig(instance, *args)
            query = query.replace("moz-fx-data-shared-prod", project_id)
            query = query.replace("telemetry", static_dataset)
            return query

        analysis = Analysis(project_id, temporary_dataset, config)
        with mock.patch.object(mozanalysis.experiment.Experiment,
                               "build_query",
                               new=build_query_test_project):
            analysis.run(current_date=dt.datetime(2020, 4, 12,
                                                  tzinfo=pytz.utc),
                         dry_run=False)
Пример #5
0
def test_skip_works(experiments):
    conf = dedent("""
        [experiment]
        skip = true
        """)
    spec = AnalysisSpec.from_dict(toml.loads(conf))
    configured = spec.resolve(experiments[0])
    with pytest.raises(ExplicitSkipException):
        Analysis("test", "test",
                 configured).run(current_date=dt.datetime(2020,
                                                          1,
                                                          1,
                                                          tzinfo=pytz.utc),
                                 dry_run=True)
Пример #6
0
def test_analysis_doesnt_choke_on_segments(experiments):
    conf = dedent("""
        [experiment]
        segments = ["regular_users_v3"]
        """)
    spec = AnalysisSpec.from_dict(toml.loads(conf))
    configured = spec.resolve(experiments[0])
    assert isinstance(configured.experiment.segments[0],
                      mozanalysis.segments.Segment)
    Analysis("test", "test",
             configured).run(current_date=dt.datetime(2020,
                                                      1,
                                                      1,
                                                      tzinfo=pytz.utc),
                             dry_run=True)
    def analysis_mock_run(self,
                          monkeypatch,
                          config,
                          static_dataset,
                          temporary_dataset,
                          project_id,
                          log_config=None):
        orig_enrollments = mozanalysis.experiment.Experiment.build_enrollments_query
        orig_metrics = mozanalysis.experiment.Experiment.build_metrics_query

        def build_enrollments_query_test_project(instance, *args, **kwargs):
            # to use the test project and dataset, we need to change the SQL query
            # generated by mozanalysis
            query = orig_enrollments(instance, *args)
            query = query.replace("moz-fx-data-shared-prod", project_id)
            query = query.replace("telemetry", static_dataset)
            return query

        def build_metrics_query_test_project(instance, *args, **kwargs):
            # to use the test project and dataset, we need to change the SQL query
            # generated by mozanalysis
            query = orig_metrics(instance, *args)
            query = query.replace("moz-fx-data-shared-prod", project_id)
            query = query.replace("telemetry", static_dataset)
            return query

        orig_cluster = dask.distributed.LocalCluster.__init__

        def mock_local_cluster(instance, dashboard_address, processes,
                               threads_per_worker, *args, **kwargs):
            # if processes are used then `build_query_test_project` gets ignored
            return orig_cluster(
                instance,
                dashboard_address=dashboard_address,
                processes=False,
                threads_per_worker=threads_per_worker,
            )

        analysis = Analysis(project_id, temporary_dataset, config, log_config)

        monkeypatch.setattr(
            mozanalysis.experiment.Experiment,
            "build_enrollments_query",
            build_enrollments_query_test_project,
        )
        monkeypatch.setattr(
            mozanalysis.experiment.Experiment,
            "build_metrics_query",
            build_metrics_query_test_project,
        )
        monkeypatch.setattr(dask.distributed.LocalCluster, "__init__",
                            mock_local_cluster)

        analysis.ensure_enrollments(dt.datetime(2020, 4, 12, tzinfo=pytz.utc))
        analysis.run(dt.datetime(2020, 4, 12, tzinfo=pytz.utc), dry_run=False)
Пример #8
0
def test_fenix_experiments_use_right_datasets(fenix_experiments, monkeypatch):
    for experiment in fenix_experiments:
        called = 0

        def dry_run_query(query):
            nonlocal called
            called = called + 1
            dataset = re.sub(r"[^A-Za-z0-9_]", "_", experiment.app_id)
            assert dataset in query
            assert query.count(dataset) == query.count("org_mozilla")

        monkeypatch.setattr("jetstream.analysis.dry_run_query", dry_run_query)
        config = AnalysisSpec.default_for_experiment(experiment).resolve(
            experiment)
        Analysis("spam", "eggs", config).validate()
        assert called == 2
Пример #9
0
 def validate(self) -> None:
     if self.platform not in PLATFORM_CONFIGS:
         raise ValueError(f"Platform '{self.platform}' is unsupported.")
     app_id = PLATFORM_CONFIGS[self.platform].app_id
     dummy_experiment = jetstream.experimenter.Experiment(
         experimenter_slug="dummy-experiment",
         normandy_slug="dummy_experiment",
         type="v6",
         status="Live",
         branches=[],
         end_date=None,
         reference_branch="control",
         is_high_population=False,
         start_date=dt.datetime.now(UTC),
         proposed_enrollment=14,
         app_id=app_id,
         app_name=self.platform,  # seems to be unused
     )
     spec = AnalysisSpec.default_for_experiment(dummy_experiment)
     spec.merge_outcome(self.spec)
     conf = spec.resolve(dummy_experiment)
     Analysis("no project", "no dataset", conf).validate()
Пример #10
0
def test_regression_20200320():
    experiment_json = r"""
        {
          "experiment_url": "https://experimenter.services.mozilla.com/experiments/impact-of-level-2-etp-on-a-custom-distribution/",
          "type": "pref",
          "name": "Impact of Level 2 ETP on a Custom Distribution",
          "slug": "impact-of-level-2-etp-on-a-custom-distribution",
          "public_name": "Impact of Level 2 ETP",
          "status": "Live",
          "start_date": 1580169600000,
          "end_date": 1595721600000,
          "proposed_start_date": 1580169600000,
          "proposed_enrollment": null,
          "proposed_duration": 180,
          "normandy_slug": "pref-impact-of-level-2-etp-on-a-custom-distribution-release-72-80-bug-1607493",
          "normandy_id": 906,
          "other_normandy_ids": [],
          "variants": [
            {
              "description": "",
              "is_control": true,
              "name": "treatment",
              "ratio": 100,
              "slug": "treatment",
              "value": "true",
              "addon_release_url": null,
              "preferences": []
            }
          ]
        }
    """  # noqa
    experiment = ExperimentV1.from_dict(
        json.loads(experiment_json)).to_experiment()
    config = AnalysisSpec().resolve(experiment)
    analysis = Analysis("test", "test", config)
    with pytest.raises(NoEnrollmentPeriodException):
        analysis.run(current_date=dt.datetime(2020, 3, 19, tzinfo=pytz.utc),
                     dry_run=True)
Пример #11
0
 def validate(self, experiment: jetstream.experimenter.Experiment) -> None:
     spec = AnalysisSpec.default_for_experiment(experiment)
     spec.merge(self.spec)
     conf = spec.resolve(experiment)
     Analysis("no project", "no dataset", conf).validate()
Пример #12
0
def test_is_high_population_check(experiments):
    x = experiments[3]
    config = AnalysisSpec.default_for_experiment(x).resolve(x)

    with pytest.raises(HighPopulationException):
        Analysis("spam", "eggs", config).check_runnable()