def test_task_calls_backend(self, mock_task, mock_backend): # Given itservice = factories.ITServiceFactory(is_main=True, backend_id='VALID') min_dt = datetime.date.today().replace(day=10) - relativedelta( months=2) max_dt = datetime.date.today().replace(day=10) - relativedelta( months=1) mock_backend().get_sla_range.return_value = min_dt, max_dt # When pull_sla(itservice.host.uuid) # Then mock_backend().get_sla_range.assert_called_once_with( itservice.backend_id) month1_beginning = min_dt.replace(day=1) month2_beginning = min_dt.replace(day=1) + relativedelta(months=+1) mock_task.delay.assert_has_calls([ mock.call(itservice.pk, format_period(min_dt), datetime_to_timestamp(month1_beginning), datetime_to_timestamp(month2_beginning)), mock.call(itservice.pk, format_period(max_dt), datetime_to_timestamp(month2_beginning), datetime_to_timestamp(max_dt)) ])
def get_response(self): response = self.client.get(reverse('stats_quota_timeline'), data={ 'aggregate': 'project', 'uuid': self.project.uuid.hex, 'item': 'vcpu', 'from': core_utils.datetime_to_timestamp(timezone.now() - timedelta(minutes=1)), 'to': core_utils.datetime_to_timestamp(timezone.now() + timedelta(minutes=1)) }) return response
def test_alert_list_can_be_filtered_by_created_date(self): project = structure_factories.ProjectFactory(customer=self.customer) alert1 = factories.AlertFactory(scope=project, created=timezone.now()-timedelta(days=1)) alert2 = factories.AlertFactory(scope=project, created=timezone.now()-timedelta(days=3)) self.client.force_authenticate(self.owner) response = self.client.get(factories.AlertFactory.get_list_url(), data={ 'created_from': core_utils.datetime_to_timestamp(timezone.now()-timedelta(days=2)), 'created_to': core_utils.datetime_to_timestamp(timezone.now())}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertIn(alert1.uuid.hex, [a['uuid'] for a in response.data]) self.assertNotIn(alert2.uuid.hex, [a['uuid'] for a in response.data])
class ItemsAggregatedValuesSerializer(serializers.Serializer): """ Validate input parameters for items_aggregated_values action. """ start = serializers.IntegerField(default=lambda: datetime_to_timestamp( timezone.now() - timedelta(hours=1))) end = serializers.IntegerField( default=lambda: datetime_to_timestamp(timezone.now())) method = serializers.ChoiceField(default='MAX', choices=('MIN', 'MAX')) def validate(self, data): """ Check that the start is before the end. """ if 'start' in data and 'end' in data and data['start'] >= data['end']: raise serializers.ValidationError("End must occur after start") return data
def setUp(self): # customers self.old_customer = factories.CustomerFactory( created=timezone.now() - timedelta(days=10) ) self.new_customer = factories.CustomerFactory( created=timezone.now() - timedelta(days=1) ) # projects self.old_projects = factories.ProjectFactory.create_batch( 3, created=timezone.now() - timedelta(days=10), customer=self.old_customer ) self.new_projects = factories.ProjectFactory.create_batch( 3, created=timezone.now() - timedelta(days=1), customer=self.new_customer ) # users self.staff = factories.UserFactory(is_staff=True) self.old_customer_owner = factories.UserFactory() self.old_customer.add_user(self.old_customer_owner, models.CustomerRole.OWNER) self.all_projects_admin = factories.UserFactory() for p in self.old_projects + self.new_projects: p.add_user(self.all_projects_admin, models.ProjectRole.ADMINISTRATOR) self.url = reverse('stats_creation_time') self.default_data = { 'from': core_utils.datetime_to_timestamp( timezone.now() - timedelta(days=12) ), 'datapoints': 2, }
def create_report_archive(log_directory, interval): """ Create tar.gz archive from files in directory filtered by time delta. :param log_directory: directory with log files, for example, /var/log/waldur/ :param interval: time delta, for example, datetime.timedelta(days=7) files older that specified interval are filtered out :return: ContentFile with gzipped archive """ today = datetime.datetime.today() cutoff = datetime_to_timestamp(today - interval) log_filenames = [] log_dir = log_directory for log_file in os.listdir(log_dir): full_path = os.path.join(log_dir, log_file) stat = os.stat(full_path) if stat.st_mtime > cutoff: log_filenames.append(full_path) stream = BytesIO() with tarfile.open(fileobj=stream, mode='w:gz') as archive: for filename in log_filenames: archive.add(filename) return ContentFile(stream.getvalue())
def get_trigger_request(self, query): request = {} request['selectHosts'] = 1 request['active'] = 1 request['expandComment'] = 1 request['expandDescription'] = 1 request['expandExpression'] = 1 if 'host_id' in query: request['hostids'] = query['host_id'] if 'host_name' in query: request['host'] = query['host_name'] if 'changed_after' in query: request['lastChangeSince'] = datetime_to_timestamp( query['changed_after']) if 'changed_before' in query: request['lastChangeTill'] = datetime_to_timestamp( query['changed_before']) if 'min_priority' in query: request['min_severity'] = query['min_priority'] if 'priority' in query and len(query['priority']) > 0: request['filter'] = {'priority': list(query['priority'])} if 'acknowledge_status' in query: acknowledge_status = query['acknowledge_status'] Status = models.Trigger.AcknowledgeStatus status_mapping = { Status.SOME_EVENTS_UNACKNOWLEDGED: 'withUnacknowledgedEvents', Status.LAST_EVENT_UNACKNOWLEDGED: 'withLastEventUnacknowledged', Status.ALL_EVENTS_ACKNOWLEDGED: 'withAcknowledgedEvents', } if acknowledge_status in status_mapping: key = status_mapping[acknowledge_status] request[key] = 1 if 'value' in query: request.setdefault('filter', {}) request['filter']['value'] = query['value'] return request
def get_item_stats(self, hostid, item, points): if item.value_type == models.Item.ValueTypes.FLOAT: history_table = 'history' trend_table = 'trends' elif item.value_type == models.Item.ValueTypes.INTEGER: # Integer value history_table = 'history_uint' trend_table = 'trends_uint' else: raise ZabbixBackendError( 'Cannot get statistics for non-numerical item %s' % item.key) history_retention_days = item.history history_delay_seconds = item.delay or self.HISTORY_DELAY_SECONDS trend_delay_seconds = self.TREND_DELAY_SECONDS trends_start_date = datetime_to_timestamp(timezone.now() - timedelta( days=history_retention_days)) points = points[::-1] history_cursor = self._get_history(item.key, hostid, history_table, points[-1] - history_delay_seconds, points[0]) trends_cursor = self._get_history(item.key, hostid, trend_table, points[-1] - trend_delay_seconds, points[0]) values = [] if points[0] > trends_start_date: next_value = history_cursor.fetchone() else: next_value = trends_cursor.fetchone() for end, start in zip(points[:-1], points[1:]): if start > trends_start_date: interval = history_delay_seconds else: interval = trend_delay_seconds value = None while True: if next_value is None: break time, value = next_value if item.is_byte(): value = self.b2mb(value) if time <= end: if end - time < interval or time > start: break else: if start > trends_start_date: next_value = history_cursor.fetchone() else: next_value = trends_cursor.fetchone() values.append(value) return values[::-1]
def history(self, request, uuid=None): """ Warning! This endpoint is deprecated. Please use daily-quotas endpoint instead. Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp. """ mapped = { 'start': request.query_params.get('start'), 'end': request.query_params.get('end'), 'points_count': request.query_params.get('points_count'), 'point_list': request.query_params.getlist('point'), } history_serializer = HistorySerializer(data={k: v for k, v in mapped.items() if v}) history_serializer.is_valid(raise_exception=True) quota = self.get_object() serializer = self.get_serializer(quota) serialized_versions = [] for point_date in history_serializer.get_filter_data(): serialized = {'point': datetime_to_timestamp(point_date)} version = Version.objects.get_for_object(quota).filter(revision__date_created__lte=point_date) if version.exists(): # make copy of serialized data and update field that are stored in version version_object = version.first()._object_version.object serialized['object'] = serializer.data.copy() serialized['object'].update({ f: getattr(version_object, f) for f in quota.get_version_fields() }) serialized_versions.append(serialized) return response.Response(serialized_versions, status=status.HTTP_200_OK)
def pull_sla(host_uuid): """ Pull SLAs for given Zabbix host for all time of its existence in Zabbix """ try: host = Host.objects.get(uuid=host_uuid) except Host.DoesNotExist: logger.warning( 'Unable to pull SLA for host with UUID %s, because it is gone', host_uuid) return try: itservice = ITService.objects.get(host=host, is_main=True) except ITService.DoesNotExist: logger.warning( 'Unable to pull SLA for host with UUID %s, because IT service does not exist', host_uuid, ) return backend = itservice.get_backend() try: # Get dates of first and last service alarm min_dt, max_dt = backend.get_sla_range(itservice.backend_id) except ZabbixBackendError as e: logger.warning( 'Unable to pull SLA for host with with UUID %s because of database error: %s', host_uuid, e, ) return # Shift date to beginning of the month current_point = min_dt.replace(day=1) while current_point <= max_dt: period = format_period(current_point) start_time = core_utils.datetime_to_timestamp(current_point) current_point += relativedelta(months=+1) end_time = core_utils.datetime_to_timestamp(min(max_dt, current_point)) update_itservice_sla.delay(itservice.pk, period, start_time, end_time) logger.debug('Successfully pulled SLA for host with with UUID %s', host_uuid)
def test_endpoint_does_not_return_object_if_date(self): history_timestamp = core_utils.datetime_to_timestamp(timezone.now() - timedelta( hours=2)) self.client.force_authenticate(self.owner) response = self.client.get(self.url, data={'point': history_timestamp}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertNotIn('object', response.data[0])
def setUp(self): super(EventsTest, self).setUp() today = datetime.date.today() timestamp = datetime_to_timestamp(today) period = format_period(today) ResourceSlaStateTransition.objects.create(scope=self.vm1, period=period, timestamp=timestamp, state=True) ResourceSlaStateTransition.objects.create(scope=self.vm2, period=period, timestamp=timestamp, state=False) self.url = reverse('resource-sla-state-transition-list')
def to_representation(self, value): for str_format in self.formats: try: date_time = datetime.strptime(value, str_format) except ValueError: pass else: return core_utils.datetime_to_timestamp(date_time) raise serializers.ValidationError( _('This field does not have datetime format that matches %s string.' ) % value)
def test_sla_is_populated(self): # Arrange issue = self.fixture.issue dt = timezone.now().replace(microsecond=0) ts = datetime_to_timestamp(dt) * 1000 self.mocked_jira.issue.return_value.fields.field103.ongoingCycle.breachTime.epochMillis = ts # Act self.backend.update_issue_from_jira(issue) issue.refresh_from_db() # Assert self.assertEqual(issue.first_response_sla, dt)
def test_old_version_of_quota_is_available(self): old_usage = self.quota.usage self.quota.usage = self.quota.usage + 1 self.quota.save() history_timestamp = core_utils.datetime_to_timestamp(timezone.now() - timedelta( minutes=30)) self.client.force_authenticate(self.owner) response = self.client.get(self.url, data={'point': history_timestamp}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data[0]['point'], history_timestamp) self.assertEqual(response.data[0]['object']['usage'], old_usage)
def setUp(self): self.staff = structure_factories.UserFactory(is_staff=True) self.client.force_authenticate(self.staff) self.itservice = factories.ITServiceFactory() today = datetime.date.today() period = format_period(today) self.timestamp = datetime_to_timestamp(today) next_month = datetime.date.today() + relativedelta(months=1) self.next_month = format_period(next_month) self.history = models.SlaHistory.objects.create( itservice=self.itservice, period=period, value=100.0) self.events = models.SlaHistoryEvent.objects.create( history=self.history, timestamp=self.timestamp, state='U')
def setUp(self): self.datetime = utils.timeshift(days=-1) self.timestamp = utils.datetime_to_timestamp(self.datetime)
def datetime_to_elasticsearch_timestamp(self, dt): """ Elasticsearch calculates timestamp in milliseconds """ return datetime_to_timestamp(dt) * 1000
def to_representation(self, value): return utils.datetime_to_timestamp(value)
def setUp(self): self.field = StringTimestampField(formats=('%Y-%m-%dT%H:%M:%S',)) self.datetime = core_utils.timeshift() self.datetime_str = self.datetime.strftime('%Y-%m-%dT%H:%M:%S') self.timestamp = core_utils.datetime_to_timestamp(self.datetime)