예제 #1
0
def get_dropbox_client(scheduled_process: ScheduledProcess) -> dropbox.Dropbox:
    """
    The Dropbox refresh token we locally store (after linking the user's Dropbox account) never expires. This method
    returns a new and authenticated instance of the Dropbox client that can be used for its (short) duration.
    """
    dropbox_settings = DropboxSettings.get_solo()
    dbx = dropbox.Dropbox(oauth2_refresh_token=dropbox_settings.refresh_token,
                          app_key=settings.DSMRREADER_DROPBOX_APP_KEY)

    try:
        dbx.refresh_access_token()
        dbx.check_user()  # No-op, just to verify the client/session.
    except Exception as error:
        logger.error(' - Dropbox error: %s', error)

        # Network errors should NOT reset the client side app token (see further below). Only API errors should do so.
        if not isinstance(error, dropbox.exceptions.DropboxException):
            scheduled_process.delay(minutes=1)
            raise

        logger.error(' - Removing Dropbox credentials due to API failure')
        message = _(
            "Unable to authenticate with Dropbox, removing credentials. Error: {}"
            .format(error))
        dsmr_frontend.services.display_dashboard_message(message=message)
        DropboxSettings.objects.update(refresh_token=None,
                                       )  # Does not trigger auto disable
        scheduled_process.disable()
        raise

    logger.info('Dropbox: Auth/user check OK')
    return dbx
예제 #2
0
def run(scheduled_process: ScheduledProcess) -> None:
    dropbox_settings = DropboxSettings.get_solo()

    if not dropbox_settings.refresh_token:
        # Should not happen, safe fallback
        scheduled_process.disable()
        return

    dropbox_client = get_dropbox_client(scheduled_process=scheduled_process)
    backup_directory = dsmr_backup.services.backup.get_backup_directory()

    # Sync each file, recursively.
    for current_file in list_files_in_dir(directory=backup_directory):
        if not should_sync_file(current_file):
            continue

        sync_file(scheduled_process=scheduled_process,
                  dropbox_client=dropbox_client,
                  local_root_dir=backup_directory,
                  abs_file_path=current_file)

    scheduled_process.delay(hours=settings.DSMRREADER_DROPBOX_SYNC_INTERVAL)
예제 #3
0
def run(scheduled_process: ScheduledProcess) -> None:
    retention_settings = RetentionSettings.get_solo()

    if retention_settings.data_retention_in_hours == RetentionSettings.RETENTION_NONE:
        scheduled_process.disable()  # Changing the retention settings in the admin will re-activate it again.
        return

    # These models should be rotated with retention. Dict value is the datetime field used.
    ITEM_COUNT_PER_HOUR = 2
    MODELS_TO_CLEANUP = {
        DsmrReading.objects.processed(): 'timestamp',
        ElectricityConsumption.objects.all(): 'read_at',
        GasConsumption.objects.all(): 'read_at',
    }

    retention_date = timezone.now() - timezone.timedelta(hours=retention_settings.data_retention_in_hours)
    data_to_clean_up = False

    # We need to force UTC here, to avoid AmbiguousTimeError's on DST changes.
    timezone.activate(pytz.UTC)

    for base_queryset, datetime_field in MODELS_TO_CLEANUP.items():
        hours_to_cleanup = base_queryset.filter(
            **{'{}__lt'.format(datetime_field): retention_date}
        ).annotate(
            item_hour=TruncHour(datetime_field)
        ).values('item_hour').annotate(
            item_count=Count('id')
        ).order_by().filter(
            item_count__gt=ITEM_COUNT_PER_HOUR
        ).order_by('item_hour').values_list(
            'item_hour', flat=True
        )[:settings.DSMRREADER_RETENTION_MAX_CLEANUP_HOURS_PER_RUN]

        hours_to_cleanup = list(hours_to_cleanup)  # Force evaluation.

        if not hours_to_cleanup:
            continue

        data_to_clean_up = True

        for current_hour in hours_to_cleanup:
            # Fetch all data per hour.
            data_set = base_queryset.filter(
                **{
                    '{}__gte'.format(datetime_field): current_hour,
                    '{}__lt'.format(datetime_field): current_hour + timezone.timedelta(hours=1),
                }
            )

            # Extract the first/last item, so we can exclude it.
            # NOTE: Want to alter this? Please update ITEM_COUNT_PER_HOUR above as well!
            keeper_pks = [
                data_set.order_by(datetime_field)[0].pk,
                data_set.order_by('-{}'.format(datetime_field))[0].pk
            ]

            # Now drop all others.
            logger.debug('Retention: Cleaning up: %s (%s)', current_hour, data_set[0].__class__.__name__)
            data_set.exclude(pk__in=keeper_pks).delete()

    timezone.deactivate()

    # Delay for a bit, as there is nothing to do.
    if not data_to_clean_up:
        scheduled_process.delay(hours=12)