Example #1
0
 def test_server_error(self, app, caplog):
     """Logs unexpected server errors."""
     with capture_app_logs(app):
         canvas_error = MockResponse(401, {},
                                     '{"message": "Unauthorized."}')
         with register_mock(dispatcher.dispatch, canvas_error):
             response = dispatcher.dispatch('create_canvas_schema')
             assert '401 Client Error: Unauthorized' in caplog.text
             assert not response
Example #2
0
    def run(self, term_id=None):
        job_id = self.generate_job_id()
        if not term_id:
            term_id = current_term_id()
        if app.config['TEST_CANVAS_COURSE_IDS']:
            canvas_course_ids = app.config['TEST_CANVAS_COURSE_IDS']
        else:
            canvas_course_ids = [
                row['canvas_course_id']
                for row in get_enrolled_canvas_sites_for_term(term_id)
            ]
        app.logger.info(
            f'Starting Canvas grade change log import job {job_id} for term {term_id}, {len(canvas_course_ids)} course sites...'
        )

        success_count = 0
        failure_count = 0
        index = 1
        for course_id in canvas_course_ids:
            path = f'/api/v1/audit/grade_change/courses/{course_id}'
            s3_key = f'{get_s3_canvas_api_path()}/grade_change_log/grade_change_log_{course_id}'
            create_canvas_api_import_status(
                job_id=job_id,
                term_id=term_id,
                course_id=course_id,
                table_name='grade_change_log',
            )
            app.logger.info(
                f'Fetching Canvas grade change log for course id {course_id}, term {term_id} ({index} of {len(canvas_course_ids)})',
            )
            response = dispatch(
                'import_canvas_api_data',
                data={
                    'course_id': course_id,
                    'path': path,
                    's3_key': s3_key,
                    'job_id': job_id,
                },
            )
            if not response:
                app.logger.error(
                    f'Canvas grade change log import failed for course id {course_id}.'
                )
                update_canvas_api_import_status(
                    job_id=job_id,
                    course_id=course_id,
                    status='error',
                )
                failure_count += 1
            else:
                success_count += 1
            index += 1

        return (
            f'Canvas grade change log import completed for term {term_id}: {success_count} succeeded, '
            f'{failure_count} failed.')
Example #3
0
    def run(self, cleanup=True):
        job_id = self.generate_job_id()
        app.logger.info(f'Starting Canvas snapshot resync job... (id={job_id})')
        md = metadata.get_failures_from_last_sync()
        if not md['failures']:
            return f"No failures found for job_id {md['job_id']}, skipping resync."
        app.logger.info(f"Found {len(md['failures'])} failures for job_id {md['job_id']}, attempting resync.")

        failures = 0
        successes = 0

        for failure in md['failures']:
            if cleanup and failure['destination_url']:
                destination_key = failure['destination_url'].split(app.config['LOCH_S3_BUCKET'] + '/')[1]
                if s3.delete_objects([destination_key]):
                    metadata.delete_canvas_snapshots([destination_key])
                else:
                    app.logger.error(f'Could not delete failed snapshot from S3 (url={failure.destination_url})')
            metadata.create_canvas_sync_status(
                job_id=job_id,
                filename=failure['filename'],
                canvas_table=failure['canvas_table'],
                # The original signed source URL will remain valid if the resync job is run within an hour of the sync job.
                # TODO Add logic to fetch a new signed URL from the Canvas Data API for older jobs.
                source_url=failure['source_url'],
            )

            # Regenerate the S3 key, since the failed job may not have progressed far enough to store a destination URL in its metadata.
            if failure['canvas_table'] == 'requests':
                key_components = [berkeley.s3_canvas_data_path_current_term(), failure['canvas_table'], failure['filename']]
            else:
                key_components = [get_s3_canvas_daily_path(), failure['canvas_table'], failure['filename']]
            key = '/'.join(key_components)
            response = dispatch('sync_file_to_s3', data={'canvas_sync_job_id': job_id, 'url': failure['source_url'], 'key': key})

            if not response:
                app.logger.error('Failed to dispatch S3 resync of snapshot ' + failure['filename'])
                metadata.update_canvas_sync_status(job_id, key, 'error', details=f'Failed to dispatch: {response}')
                failures += 1
            else:
                app.logger.info('Dispatched S3 resync of snapshot ' + failure['filename'])
                successes += 1

        return f'Canvas snapshot resync job dispatched to workers ({successes} successful dispatches, {failures} failures).'
Example #4
0
 def test_dispatch_fixture(self, app):
     """Returns fixture data."""
     response = dispatcher.dispatch('create_canvas_schema')
     assert response['status'] == 'started'
    def run(self, cleanup=True):
        job_id = self.generate_job_id()
        app.logger.info(f'Starting Canvas snapshot sync job... (id={job_id})')

        snapshot_response = canvas_data.get_snapshots()
        if not snapshot_response:
            raise BackgroundJobError(
                'Error retrieving Canvas data snapshots, aborting job.')
        snapshots = snapshot_response.get('files', [])

        def should_sync(snapshot):
            return snapshot[
                'table'] == 'requests' and snapshot['partial'] is False

        snapshots_to_sync = [s for s in snapshots if should_sync(s)]
        app.logger.info(
            f'Will sync {len(snapshots_to_sync)} of {len(snapshots)} available files from Canvas Data.'
        )

        success = 0
        failure = 0

        for snapshot in snapshots_to_sync:
            metadata.create_canvas_sync_status(
                job_id=job_id,
                filename=snapshot['filename'],
                canvas_table=snapshot['table'],
                source_url=snapshot['url'],
            )

            key_components = [
                app.config['LOCH_S3_CANVAS_DATA_PATH_HISTORICAL'],
                snapshot['table'], snapshot['filename']
            ]

            key = '/'.join(key_components)
            response = dispatch('sync_file_to_s3',
                                data={
                                    'canvas_sync_job_id': job_id,
                                    'url': snapshot['url'],
                                    'key': key
                                })

            if not response:
                app.logger.error('Failed to dispatch S3 sync of snapshot ' +
                                 snapshot['filename'])
                metadata.update_canvas_sync_status(
                    job_id,
                    key,
                    'error',
                    details=f'Failed to dispatch: {response}')
                failure += 1
            else:
                app.logger.info('Dispatched S3 sync of snapshot ' +
                                snapshot['filename'])
                success += 1

        if cleanup:
            app.logger.info('Will remove obsolete snapshots from S3.')
            current_snapshot_filenames = [
                s['filename'] for s in snapshots_to_sync
            ]
            requests_prefix = app.config[
                'LOCH_S3_CANVAS_DATA_PATH_HISTORICAL'] + '/requests'
            delete_result = s3.delete_objects_with_prefix(
                requests_prefix, whitelist=current_snapshot_filenames)
            if not delete_result:
                app.logger.error('Cleanup of obsolete snapshots failed.')
        return f'Canvas snapshot sync job dispatched to workers ({success} successful dispatches, {failure} failures).'
Example #6
0
    def run(self, cleanup=True):
        job_id = self.generate_job_id()
        app.logger.info(f'Starting Canvas snapshot sync job... (id={job_id})')

        snapshot_response = canvas_data.get_snapshots()
        if not snapshot_response:
            raise BackgroundJobError(
                'Error retrieving Canvas data snapshots, aborting job.')
        snapshots = snapshot_response.get('files', [])

        def should_sync(snapshot):
            # For tables other than requests, sync all snapshots.
            # For the requests table, sync snapshots that are partial or later than the configured cutoff date.
            def after_cutoff_date(url):
                match = re.search('requests/(20\d{6})', url)
                return match is not None and (
                    match[1] >=
                    app.config['LOCH_CANVAS_DATA_REQUESTS_CUTOFF_DATE'])

            return snapshot['table'] != 'requests' or snapshot[
                'partial'] is True or after_cutoff_date(snapshot['url'])

        snapshots_to_sync = [s for s in snapshots if should_sync(s)]
        app.logger.info(
            f'Will sync {len(snapshots_to_sync)} of {len(snapshots)} available files from Canvas Data.'
        )

        success = 0
        failure = 0

        for snapshot in snapshots_to_sync:
            metadata.create_canvas_sync_status(
                job_id=job_id,
                filename=snapshot['filename'],
                canvas_table=snapshot['table'],
                source_url=snapshot['url'],
            )
            if snapshot['table'] == 'requests':
                key_components = [
                    berkeley.s3_canvas_data_path_current_term(),
                    snapshot['table'], snapshot['filename']
                ]
            else:
                key_components = [
                    get_s3_canvas_daily_path(), snapshot['table'],
                    snapshot['filename']
                ]

            key = '/'.join(key_components)
            response = dispatch('sync_file_to_s3',
                                data={
                                    'canvas_sync_job_id': job_id,
                                    'url': snapshot['url'],
                                    'key': key
                                })

            if not response:
                app.logger.error('Failed to dispatch S3 sync of snapshot ' +
                                 snapshot['filename'])
                metadata.update_canvas_sync_status(
                    job_id,
                    key,
                    'error',
                    details=f'Failed to dispatch: {response}')
                failure += 1
            else:
                app.logger.info('Dispatched S3 sync of snapshot ' +
                                snapshot['filename'])
                success += 1

        if cleanup:
            app.logger.info('Will remove obsolete snapshots from S3.')
            current_snapshot_filenames = [
                s['filename'] for s in snapshots_to_sync
            ]
            requests_prefix = berkeley.s3_canvas_data_path_current_term(
            ) + '/requests'
            delete_result = s3.delete_objects_with_prefix(
                requests_prefix, whitelist=current_snapshot_filenames)
            if not delete_result:
                app.logger.error('Cleanup of obsolete snapshots failed.')
        return f'Canvas snapshot sync job dispatched to workers ({success} successful dispatches, {failure} failures).'