示例#1
0
def run(scheduled_process: ScheduledProcess) -> None:
    """ Checks whether a new backup should be created. Creates one if needed as well. """

    # Create a partial, minimal backup first. Since it will grow and take disk space, only create one weekly.
    today = timezone.localtime(timezone.now()).date()

    if today.isoweekday() == 1:
        create_partial(folder=os.path.join(get_backup_directory(), 'archive',
                                           formats.date_format(today, 'Y'),
                                           formats.date_format(today, 'm')),
                       models_to_backup=(DayStatistics, ))

    # Now create full.
    create_full(folder=get_backup_directory())

    # Schedule tomorrow, for the time specified.
    backup_settings = BackupSettings.get_solo()
    next_backup_timestamp = timezone.now() + timezone.timedelta(days=1)
    next_backup_timestamp = timezone.localtime(next_backup_timestamp)

    next_backup_timestamp = next_backup_timestamp.replace(
        hour=backup_settings.backup_time.hour,
        minute=backup_settings.backup_time.minute,
        second=0,
        microsecond=0)

    scheduled_process.reschedule(next_backup_timestamp)
示例#2
0
def run(scheduled_process: ScheduledProcess) -> None:
    """ Analyzes daily consumption and statistics to determine whether new analysis is required. """
    if not is_data_available():
        logger.debug('Stats: No data available')
        scheduled_process.delay(hours=1)
        return

    now = timezone.localtime(timezone.now())
    target_day = get_next_day_to_generate()
    next_day = target_day + timezone.timedelta(days=1)

    # Skip current day, wait until midnight.
    if target_day >= now.date():
        logger.debug('Stats: Waiting for day to pass: %s', target_day)
        scheduled_process.reschedule(
            timezone.make_aware(timezone.datetime.combine(next_day, time.min)))
        return

    # All readings of the day must be processed.
    unprocessed_readings = DsmrReading.objects.unprocessed().filter(
        timestamp__date=target_day).exists()

    if unprocessed_readings:
        logger.debug('Stats: Found unprocessed readings for: %s', target_day)
        scheduled_process.delay(minutes=5)
        return

    # Ensure we have any consumption.
    consumption_found = ElectricityConsumption.objects.filter(
        read_at__date=target_day).exists()

    if not consumption_found:
        logger.debug('Stats: Missing consumption data for: %s', target_day)
        scheduled_process.delay(hours=1)
        return

    # If we recently supported gas, make sure we've received a gas reading on the next day (or later).
    recently_gas_read = GasConsumption.objects.filter(
        read_at__date__gte=target_day - timezone.timedelta(days=1)).exists()

    # Unless it was disabled.
    gas_capability = dsmr_backend.services.backend.get_capability(
        Capability.GAS)

    if gas_capability and recently_gas_read and not GasConsumption.objects.filter(
            read_at__date__gte=next_day).exists():
        logger.debug('Stats: Waiting for first gas reading on the next day...')
        scheduled_process.delay(minutes=5)
        return

    create_statistics(target_day=target_day)

    # We keep trying until we've caught on to the current day (which will then delay it for a day above).
    scheduled_process.delay(seconds=1)
    return
示例#3
0
def run(scheduled_process: ScheduledProcess) -> None:
    mindergas_settings = MinderGasSettings.get_solo()

    # Only when enabled and token set.
    if not mindergas_settings.auth_token:
        mindergas_settings.update(export=False)  # Should also disable SP.
        return

    # Nonsense when having no data.
    if not dsmr_backend.services.backend.get_capability(Capability.GAS):
        scheduled_process.delay(hours=1)
        return

    try:
        export()
    except Exception as error:
        logger.exception(error)

        scheduled_process.delay(hours=1)
        dsmr_frontend.services.display_dashboard_message(message=_(
            'Failed to export to MinderGas: {}'.format(error)
        ))
        return

    # Reschedule between 3 AM and 6 AM next day.
    midnight = timezone.localtime(timezone.make_aware(
        timezone.datetime.combine(timezone.now(), time.min)
    ))
    next_midnight = midnight + timezone.timedelta(
        hours=dsmr_backend.services.backend.hours_in_day(
            day=timezone.now().date()
        )
    )
    scheduled_process.reschedule(
        next_midnight + timezone.timedelta(
            hours=random.randint(3, 5),
            minutes=random.randint(15, 59)
        )
    )
示例#4
0
def run_quarter_hour_peaks(scheduled_process: ScheduledProcess) -> None:
    """ Calculates the quarter-hour peak consumption. For background info see issue #1084 ."""
    MINUTE_INTERVAL = 15

    # Just start with whatever time this process was scheduled.
    # As it's incremental and will fix data gaps (see further below).
    fuzzy_start = scheduled_process.planned.replace(second=0, microsecond=0)

    # The fuzzy start should be just beyond whatever we target. E.g. fuzzy start = currently 14:34
    logger.debug('Quarter hour peaks: Using %s as fuzzy start',
                 timezone.localtime(fuzzy_start))

    # Rewind at least 15 minutes. E.g. currently 14:34 -> 14:19 (rewind_minutes = 15)
    rewind_minutes = MINUTE_INTERVAL

    # Map to xx:00, xx:15, xx:30 or xx:45. E.g. 14:19 -> 14:15. Makes 19 % 15 = 4 (rewind_minutes = 15 + 4)
    rewind_minutes += (fuzzy_start - timezone.timedelta(minutes=rewind_minutes)
                       ).minute % MINUTE_INTERVAL

    # E.g. Fuzzy start was 14:34. Now we start/end at 14:15/14:30.
    start = fuzzy_start - timezone.timedelta(minutes=rewind_minutes)
    end = start + timezone.timedelta(minutes=MINUTE_INTERVAL)

    # Do NOT continue until we've received new readings AFTER the targeted end. Ensuring we do not miss any and it also
    # blocks the "self-healing" implementation when having data gaps.
    # Only happens for data gaps or directly after new installations (edge cases). This will keep pushing forward.
    if not DsmrReading.objects.filter(timestamp__gte=end).exists():
        logger.debug(
            'Quarter hour peaks: Ready but awaiting any new readings after %s, postponing for a bit...',
            timezone.localtime(end),
        )

        # Assumes new readings will arrive shortly (for most users/setups)
        scheduled_process.postpone(seconds=5)
        return

    quarter_hour_readings = DsmrReading.objects.filter(timestamp__gte=start,
                                                       timestamp__lte=end)

    # Only happens for data gaps or directly after new installations (edge cases). This will keep pushing forward.
    if len(quarter_hour_readings) < 2:
        logger.warning(
            'Quarter hour peaks: Ready but not enough readings found between %s - %s, skipping quarter...',
            timezone.localtime(start),
            timezone.localtime(end),
        )
        scheduled_process.postpone(minutes=MINUTE_INTERVAL)
        return

    first_reading = quarter_hour_readings.first()
    last_reading = quarter_hour_readings.last()
    logger.debug(
        'Quarter hour peaks: Quarter %s - %s resulted in readings %s - %s',
        timezone.localtime(start),
        timezone.localtime(end),
        timezone.localtime(first_reading.timestamp),
        timezone.localtime(last_reading.timestamp),
    )

    # Do not create duplicate data.
    existing_data = QuarterHourPeakElectricityConsumption.objects.filter(
        read_at_start__gte=start,
        read_at_start__lte=end,
    ).exists()

    if existing_data:
        logger.debug(
            'Quarter hour peaks: Ready but quarter already processed, rescheduling for next quarter...'
        )
        scheduled_process.reschedule(
            planned_at=end + timezone.timedelta(minutes=MINUTE_INTERVAL))
        return

    # Calculate quarter data.
    total_delivered_start = first_reading.electricity_delivered_1 + first_reading.electricity_delivered_2
    total_delivered_end = last_reading.electricity_delivered_1 + last_reading.electricity_delivered_2
    avg_delivered_in_quarter = total_delivered_end - total_delivered_start
    logger.debug(
        'Quarter hour peaks: Calculating for %s - %s',
        timezone.localtime(first_reading.timestamp),
        timezone.localtime(last_reading.timestamp),
    )

    new_instance = QuarterHourPeakElectricityConsumption.objects.create(
        # Using the reading timestamps used to ensure we can indicate gaps or lag in reading input.
        # E.g. due backend/datalogger process sleep or simply v4 meters emitting a reading only once per 10 seconds.
        read_at_start=first_reading.timestamp,
        read_at_end=last_reading.timestamp,
        # avg_delivered_in_quarter = kW QUARTER peak during 15 minutes... x 4 maps it to avg per hour for kW HOUR peak
        average_delivered=avg_delivered_in_quarter * 4)
    logger.debug('Quarter hour peaks: Created %s', new_instance)

    # Reschedule around the next moment we can expect to process the next quarter. Also works retroactively/with gaps.
    scheduled_process.reschedule(planned_at=new_instance.read_at_end +
                                 timezone.timedelta(minutes=MINUTE_INTERVAL))