def setUp(self):
     create_test_data.create(num_registrations=0, num_registration_dates=0)
     tasks.election_day()
     tasks.registrations()
     self.staff_user = UserFactory()
     self.staff_user.is_staff = True
     self.staff_user.save()
 def setUp(self):
     create_test_data.create(num_registrations=NUM_REGISTRATIONS)
     self.unused_center = RegistrationCenterFactory()
     self.unused_office = OfficeFactory()
     tasks.election_day()
     tasks.registrations()
     self.staff_user = UserFactory()
     self.staff_user.is_staff = True
     self.staff_user.save()
Esempio n. 3
0
 def setUp(self):
     create_test_data.create(num_registrations=NUM_REGISTRATIONS,
                             num_copy_centers=NUM_COPY_CENTERS,
                             num_no_reg_centers=NUM_NO_REG_CENTERS)
     tasks.election_day()
     tasks.registrations()
     credentials = base64.b64encode(TEST_USERNAME + ':' + TEST_PASSWORD)
     self.client.defaults['HTTP_AUTHORIZATION'] = 'Basic ' + credentials
     views.REPORT_USER_DB[TEST_USERNAME] = TEST_PASSWORD
Esempio n. 4
0
 def setUp(self):
     create_test_data.create(
         num_registrations=NUM_REGISTRATIONS,
         num_copy_centers=NUM_COPY_CENTERS,
         num_no_reg_centers=NUM_NO_REG_CENTERS,
     )
     tasks.election_day()
     tasks.registrations()
     credentials = base64.b64encode(TEST_USERNAME + ":" + TEST_PASSWORD)
     self.client.defaults["HTTP_AUTHORIZATION"] = "Basic " + credentials
     views.REPORT_USER_DB[TEST_USERNAME] = TEST_PASSWORD
Esempio n. 5
0
    def test(self):
        """ This is the one test case in this class; it performs end-to-end
        testing on reporting-api and vr-dashboard, using the following
        steps:

        1. create basic objects like RegistrationCenter (in setUp())
        2. initialize expected and actual stats dictionaries
        3. create different types of data, updating the expected stats
           dictionary to indicate what should appear on the dashboard
        4. run Celery tasks (directly) to regenerate reports based on the
           data created
        5. fetch and log the JSON reports
        6. scrape dashboard screens to get the actual stats reported
        7. compare expected and actual stats
        """
        expected_stats = {
            'by_center': {
                self.rc_1.center_id: {
                    # nothing yet
                },
                self.rc_2.center_id: {
                    # nothing yet
                },
                self.rc_3.center_id: {
                    # nothing yet
                },
                self.rc_4.center_id: {
                    # nothing yet
                },
                self.copy_of_rc_1.center_id: {
                    # nothing yet
                },
                self.rc_5.center_id: {
                    # nothing yet
                }
            },
            'by_office': {
                self.rc_1.office_id: {
                    # nothing yet
                },
                self.rc_2.office_id: {
                    # nothing yet
                },
                self.rc_3.office_id: {
                    # nothing yet
                },
                self.rc_4.office_id: {
                    # nothing yet
                },
                self.copy_of_rc_1.office_id: {
                    # nothing yet
                },
            },
            'summary': deepcopy(EMPTY_SUMMARY),
            'message_stats': {},
            'phone_history': {},
        }
        actual_stats = deepcopy(expected_stats)

        self._describe_infra()
        self._create_election_day_data(expected_stats)
        self._create_registrations(expected_stats)
        self._create_sms_messages(expected_stats)

        # Regenerate reports based on current database contents

        # Make the election day report code think it is now 3:35 p.m. on election day (just
        # after the period 2 time) so that it will flag missing period 1 and 2 reports.
        middle_of_election_day = self.election.polling_start_time.replace(hour=15, minute=35)
        with patch.object(reports, 'get_effective_reminder_time') as mock_reminder_time:
            mock_reminder_time.return_value = middle_of_election_day
            tasks.election_day()
            mock_reminder_time.assert_called()

        tasks.registrations()

        # Log the JSON reports to help with debugging
        credentials = base64.b64encode(self.reporting_user + ':' + self.reporting_password)
        auth_headers = {
            'HTTP_AUTHORIZATION': 'Basic ' + credentials
        }
        for report_rel_url in [test_reports.REGISTRATIONS_REL_URI,
                               test_reports.ELECTION_DAY_LOG_REL_URI,
                               test_reports.ELECTION_DAY_REPORT_REL_URI]:
            url = test_reports.BASE_URI + report_rel_url
            self._request(url, **auth_headers)

        # Scrape the dashboard screens
        self._read_dashboard(actual_stats)

        # Compare expected and actual stats
        logger.info('Expected:')
        logger.info(expected_stats)
        logger.info('Actual:')
        logger.info(actual_stats)
        # Compare some slices of dictionary first to narrow in on the problem
        self.assertDictEqual(expected_stats['summary'], actual_stats['summary'])
        self.assertDictEqual(expected_stats['by_center'], actual_stats['by_center'])
        self.assertDictEqual(expected_stats['by_office'], actual_stats['by_office'])
        # Everything
        self.assertDictEqual(expected_stats, actual_stats)
Esempio n. 6
0
    def test(self):
        """ This is the one test case in this class; it performs end-to-end
        testing on reporting-api and vr-dashboard, using the following
        steps:

        1. create basic objects like RegistrationCenter (in setUp())
        2. initialize expected and actual stats dictionaries
        3. create different types of data, updating the expected stats
           dictionary to indicate what should appear on the dashboard
        4. run Celery tasks (directly) to regenerate reports based on the
           data created
        5. fetch and log the JSON reports
        6. scrape dashboard screens to get the actual stats reported
        7. compare expected and actual stats
        """
        expected_stats = {
            'by_center': {
                self.rc_1.center_id: {
                    # nothing yet
                },
                self.rc_2.center_id: {
                    # nothing yet
                },
                self.rc_3.center_id: {
                    # nothing yet
                },
                self.rc_4.center_id: {
                    # nothing yet
                },
                self.copy_of_rc_1.center_id: {
                    # nothing yet
                },
                self.rc_5.center_id: {
                    # nothing yet
                }
            },
            'by_office': {
                self.rc_1.office_id: {
                    # nothing yet
                },
                self.rc_2.office_id: {
                    # nothing yet
                },
                self.rc_3.office_id: {
                    # nothing yet
                },
                self.rc_4.office_id: {
                    # nothing yet
                },
                self.copy_of_rc_1.office_id: {
                    # nothing yet
                },
            },
            'summary': deepcopy(EMPTY_SUMMARY),
            'message_stats': {},
            'phone_history': {},
        }
        actual_stats = deepcopy(expected_stats)

        self._describe_infra()
        self._create_election_day_data(expected_stats)
        self._create_registrations(expected_stats)
        self._create_sms_messages(expected_stats)

        # Regenerate reports based on current database contents

        # Make the election day report code think it is now 3:35 p.m. on election day (just
        # after the period 2 time) so that it will flag missing period 1 and 2 reports.
        middle_of_election_day = self.election.polling_start_time.replace(
            hour=15, minute=35)
        with patch.object(reports,
                          'get_effective_reminder_time') as mock_reminder_time:
            mock_reminder_time.return_value = middle_of_election_day
            tasks.election_day()
            mock_reminder_time.assert_called()

        tasks.registrations()

        # Log the JSON reports to help with debugging
        credentials = base64.b64encode(self.reporting_user + ':' +
                                       self.reporting_password)
        auth_headers = {'HTTP_AUTHORIZATION': 'Basic ' + credentials}
        for report_rel_url in [
                test_reports.REGISTRATIONS_REL_URI,
                test_reports.ELECTION_DAY_LOG_REL_URI,
                test_reports.ELECTION_DAY_REPORT_REL_URI
        ]:
            url = test_reports.BASE_URI + report_rel_url
            self._request(url, **auth_headers)

        # Scrape the dashboard screens
        self._read_dashboard(actual_stats)

        # Compare expected and actual stats
        logger.info('Expected:')
        logger.info(expected_stats)
        logger.info('Actual:')
        logger.info(actual_stats)
        # Compare some slices of dictionary first to narrow in on the problem
        self.assertDictEqual(expected_stats['summary'],
                             actual_stats['summary'])
        self.assertDictEqual(expected_stats['by_center'],
                             actual_stats['by_center'])
        self.assertDictEqual(expected_stats['by_office'],
                             actual_stats['by_office'])
        # Everything
        self.assertDictEqual(expected_stats, actual_stats)