def check_day_statistics_generation(**kwargs ) -> Optional[MonitoringStatusIssue]: from dsmr_stats.models.statistics import DayStatistics try: latest_day_statistics = DayStatistics.objects.all().order_by('-day')[0] except IndexError: return MonitoringStatusIssue(__name__, _('No day statistics found'), timezone.now()) offset = timezone.now().date() - timezone.timedelta( days=settings.DSMRREADER_STATUS_ALLOWED_DAY_STATISTICS_LAGG_IN_DAYS) latest_date_generated = latest_day_statistics.day if latest_date_generated > offset: return None return MonitoringStatusIssue( __name__, _('Day statistics are lagging behind'), timezone.make_aware( timezone.datetime(year=latest_date_generated.year, month=latest_date_generated.month, day=latest_date_generated.day, hour=23, minute=59)))
def check_influxdb_measurements_queue(**kwargs): from dsmr_influxdb.models import InfluxdbMeasurement if InfluxdbMeasurement.objects.count( ) < settings.DSMRREADER_INFLUXDB_MAX_MEASUREMENTS_IN_QUEUE: return return MonitoringStatusIssue( __name__, _('Too many outgoing InfluxDB measurements queued for transit'), InfluxdbMeasurement.objects.all().order_by('time')[0].time)
def check_mqtt_messages_queue(**kwargs): from dsmr_mqtt.models.queue import Message if Message.objects.count() < settings.DSMRREADER_MQTT_MAX_MESSAGES_IN_QUEUE: return return MonitoringStatusIssue( __name__, _('Too many outgoing MQTT messages queued for transit'), timezone.now() )
def check_unprocessed_readings(**kwargs): from dsmr_datalogger.models.reading import DsmrReading unprocessed_count = DsmrReading.objects.unprocessed().count() if unprocessed_count <= settings.DSMRREADER_STATUS_MAX_UNPROCESSED_READINGS: return return MonitoringStatusIssue( __name__, _('Too many unprocessed readings: {}').format(unprocessed_count), DsmrReading.objects.unprocessed().order_by('timestamp')[0].timestamp)
def postgresql_check_database_size(**kwargs): # pragma: nocover import dsmr_backend.services.backend pretty_size, bytes_size = dsmr_backend.services.backend.postgresql_total_database_size( ) if bytes_size < settings.DSMRREADER_STATUS_WARN_OVER_EXCESSIVE_DATABASE_SIZE: return return MonitoringStatusIssue( __name__, _('Database growing large: {}, consider data cleanup (if not already enabled)' ).format(pretty_size), timezone.now())
def check_reading_count(**kwargs): # pragma: nocover import dsmr_datalogger.services.datalogger reading_count = dsmr_datalogger.services.datalogger.postgresql_approximate_reading_count() if reading_count < settings.DSMRREADER_STATUS_WARN_OVER_EXCESSIVE_READING_COUNT: return return MonitoringStatusIssue( __name__, _('Approximately {} readings stored, consider data cleanup (if not already enabled)').format(reading_count), timezone.now() )
def test_get(self, status_mock): status_mock.return_value = [ MonitoringStatusIssue( 'source', 'description', timezone.datetime(2020, 1, 15, 12, 34, 56, 0, pytz.UTC)) ] result = self._request('application-monitoring') self.assertEqual(result['problems'], 1) self.assertEqual(result['details'][0]['source'], 'source') self.assertEqual(result['details'][0]['description'], 'description') self.assertEqual(result['details'][0]['since'], '2020-01-15T13:34:56+01:00')
def check_recent_readings(**kwargs): from dsmr_datalogger.models.reading import DsmrReading try: latest_reading = DsmrReading.objects.all().order_by('-timestamp')[0] except (DsmrReading.DoesNotExist, IndexError): return MonitoringStatusIssue( __name__, _('Waiting for the first reading ever'), timezone.now() ) max_slack = timezone.now() - timezone.timedelta( minutes=settings.DSMRREADER_STATUS_READING_OFFSET_MINUTES ) if latest_reading.timestamp > max_slack: return return MonitoringStatusIssue( __name__, _('No recent readings received'), latest_reading.timestamp )
def check_pvoutput_sync(**kwargs): from dsmr_pvoutput.models.settings import PVOutputAddStatusSettings pvoutput_settings = PVOutputAddStatusSettings.get_solo() if not pvoutput_settings.export: return offset = timezone.now() - timezone.timedelta( minutes=settings. DSMRREADER_STATUS_ALLOWED_SCHEDULED_PROCESS_LAGG_IN_MINUTES) if pvoutput_settings.next_export > offset: return return MonitoringStatusIssue( __name__, _('Waiting for the next PVOutput export to be executed'), pvoutput_settings.next_export)
def check_dropbox_sync(**kwargs): from dsmr_backup.models.settings import DropboxSettings dropbox_settings = DropboxSettings.get_solo() if not dropbox_settings.access_token: return offset = timezone.now() - timezone.timedelta( minutes=settings. DSMRREADER_STATUS_ALLOWED_SCHEDULED_PROCESS_LAGG_IN_MINUTES) if dropbox_settings.next_sync > offset: return return MonitoringStatusIssue( __name__, _('Waiting for the next Dropbox sync to be executed'), dropbox_settings.next_sync)
def check_scheduled_processes(**kwargs): from dsmr_backend.models.schedule import ScheduledProcess issues = [] offset = timezone.now() - timezone.timedelta( minutes=settings. DSMRREADER_STATUS_ALLOWED_SCHEDULED_PROCESS_LAGG_IN_MINUTES) lagging_processes = ScheduledProcess.objects.filter(active=True, planned__lt=offset) for current in lagging_processes: issues.append( MonitoringStatusIssue( __name__, _('Process behind schedule: {}').format(current.name), current.planned)) return issues
def test_request_monitoring_status_coverage(self, signal_mock): # All types of edge cases. signal_mock.return_value = (['x', None], ['y', list('~')], [ 'z', Exception() ], ['!', MonitoringStatusIssue('', '', timezone.now())]) dsmr_backend.services.backend.request_monitoring_status()