Exemplo n.º 1
0
    def testCalculateProgramMetricCombinations(self):
        """Tests the CalculateProgramMetricCombinations DoFn."""

        fake_person = StatePerson.new_with_defaults(
            person_id=123,
            gender=Gender.MALE,
            birthdate=date(1970, 1, 1),
            residency_status=ResidencyStatus.PERMANENT)

        program_events = [
            ProgramReferralEvent(state_code='US_TX',
                                 event_date=date(2011, 4, 3),
                                 program_id='program')
        ]

        # Each event will be have an output for each methodology type
        expected_population_metric_count = len(program_events) * 2

        expected_combination_counts = \
            {'referrals': expected_population_metric_count}

        test_pipeline = TestPipeline()

        output = (test_pipeline
                  | beam.Create([(fake_person, program_events)])
                  | 'Calculate Program Metrics' >> beam.ParDo(
                      pipeline.CalculateProgramMetricCombinations(), None, -1,
                      ALL_METRIC_INCLUSIONS_DICT))

        assert_that(
            output,
            AssertMatchers.count_combinations(expected_combination_counts),
            'Assert number of metrics is expected value')

        test_pipeline.run()
Exemplo n.º 2
0
    def testCalculateProgramMetricCombinations_NoReferrals(self):
        """Tests the CalculateProgramMetricCombinations when there are
        no supervision months. This should never happen because any person
        without program events is dropped entirely from the pipeline."""
        fake_person = StatePerson.new_with_defaults(
            person_id=123, gender=Gender.MALE,
            birthdate=date(1970, 1, 1),
            residency_status=ResidencyStatus.PERMANENT)

        test_pipeline = TestPipeline()

        inputs = [(self.fake_person_id, {
            'person_events': [(fake_person, [])],
            'person_metadata': [self.person_metadata]
        })]

        output = (test_pipeline
                  | beam.Create(inputs)
                  | beam.ParDo(ExtractPersonEventsMetadata())
                  | 'Calculate Program Metrics' >>
                  beam.ParDo(pipeline.CalculateProgramMetricCombinations(),
                             None, -1, ALL_METRIC_INCLUSIONS_DICT)
                  )

        assert_that(output, equal_to([]))

        test_pipeline.run()
Exemplo n.º 3
0
    def testCalculateProgramMetricCombinations_NoInput(self):
        """Tests the CalculateProgramMetricCombinations when there is
        no input to the function."""

        test_pipeline = TestPipeline()

        output = (test_pipeline
                  | beam.Create([])
                  | 'Calculate Program Metrics' >> beam.ParDo(
                      pipeline.CalculateProgramMetricCombinations(), None, -1,
                      ALL_METRIC_INCLUSIONS_DICT))

        assert_that(output, equal_to([]))

        test_pipeline.run()
Exemplo n.º 4
0
    def testCalculateProgramMetricCombinations(self):
        """Tests the CalculateProgramMetricCombinations DoFn."""

        fake_person = StatePerson.new_with_defaults(
            state_code='US_XX',
            person_id=123, gender=Gender.MALE,
            birthdate=date(1970, 1, 1),
            residency_status=ResidencyStatus.PERMANENT)

        program_events = [
            ProgramReferralEvent(
                state_code='US_XX',
                event_date=date(2011, 4, 3),
                program_id='program',
                participation_status=StateProgramAssignmentParticipationStatus.IN_PROGRESS
            ),
            ProgramParticipationEvent(
                state_code='US_XX',
                event_date=date(2011, 6, 3),
                program_id='program'
            )]

        # Each event will be have an output for each methodology type
        expected_metric_count = 2

        expected_combination_counts = \
            {'referrals': expected_metric_count,
             'participation': expected_metric_count}

        test_pipeline = TestPipeline()

        inputs = [(self.fake_person_id, {
            'person_events': [(fake_person, program_events)],
            'person_metadata': [self.person_metadata]
        })]

        output = (test_pipeline
                  | beam.Create(inputs)
                  | beam.ParDo(ExtractPersonEventsMetadata())
                  | 'Calculate Program Metrics' >>
                  beam.ParDo(pipeline.CalculateProgramMetricCombinations(),
                             None, -1, ALL_METRIC_INCLUSIONS_DICT)
                  )

        assert_that(output, AssertMatchers.count_combinations(expected_combination_counts),
                    'Assert number of metrics is expected value')

        test_pipeline.run()
Exemplo n.º 5
0
    def testCalculateProgramMetricCombinations(self):
        """Tests the CalculateProgramMetricCombinations DoFn."""

        fake_person = StatePerson.new_with_defaults(
            person_id=123,
            gender=Gender.MALE,
            birthdate=date(1970, 1, 1),
            residency_status=ResidencyStatus.PERMANENT)

        program_events = [
            ProgramReferralEvent(state_code='US_TX',
                                 event_date=date(2011, 4, 3),
                                 program_id='program')
        ]

        # Get the number of combinations of person-event characteristics.
        num_combinations = len(
            calculator.characteristic_combinations(fake_person,
                                                   program_events[0],
                                                   ALL_INCLUSIONS_DICT))
        assert num_combinations > 0

        # Each characteristic combination will be tracked for each of the
        # months and the two methodology types
        expected_population_metric_count = \
            num_combinations * len(program_events) * 2

        expected_combination_counts = \
            {'referrals': expected_population_metric_count}

        test_pipeline = TestPipeline()

        output = (test_pipeline
                  | beam.Create([(fake_person, program_events)])
                  | 'Calculate Program Metrics' >> beam.ParDo(
                      pipeline.CalculateProgramMetricCombinations(), -1,
                      ALL_INCLUSIONS_DICT).with_outputs('referrals'))

        assert_that(
            output.referrals,
            AssertMatchers.count_combinations(expected_combination_counts),
            'Assert number of metrics is expected value')

        test_pipeline.run()
Exemplo n.º 6
0
    def testCalculateProgramMetricCombinations(self):
        """Tests the CalculateProgramMetricCombinations DoFn."""

        fake_person = StatePerson.new_with_defaults(
            state_code="US_XX",
            person_id=123,
            gender=Gender.MALE,
            birthdate=date(1970, 1, 1),
            residency_status=ResidencyStatus.PERMANENT,
        )

        program_events = [
            ProgramReferralEvent(
                state_code="US_XX",
                event_date=date(2011, 4, 3),
                program_id="program",
                participation_status=StateProgramAssignmentParticipationStatus.IN_PROGRESS,
            ),
            ProgramParticipationEvent(
                state_code="US_XX", event_date=date(2011, 6, 3), program_id="program"
            ),
        ]

        expected_metric_count = 1

        expected_combination_counts = {
            "referrals": expected_metric_count,
            "participation": expected_metric_count,
        }

        test_pipeline = TestPipeline()

        inputs = [
            (
                self.fake_person_id,
                {
                    "person_events": [(fake_person, program_events)],
                    "person_metadata": [self.person_metadata],
                },
            )
        ]

        output = (
            test_pipeline
            | beam.Create(inputs)
            | beam.ParDo(ExtractPersonEventsMetadata())
            | "Calculate Program Metrics"
            >> beam.ParDo(
                pipeline.CalculateProgramMetricCombinations(),
                None,
                -1,
                ALL_METRIC_INCLUSIONS_DICT,
            )
        )

        assert_that(
            output,
            AssertMatchers.count_combinations(expected_combination_counts),
            "Assert number of metrics is expected value",
        )

        test_pipeline.run()