def test_get_instance_with_metadata(self): launched_at = datetime.datetime.utcnow() - datetime.timedelta( minutes=5) launched_at = str(launched_at) terminated_at = str(datetime.datetime.utcnow()) results = [ self._fake_instance(launched_at=launched_at, terminated_at=terminated_at, deleted=True) ] metadata_results = self._fake_metadata() self.mock_for_query('nova', nova.GET_INSTANCE_QUERY % INSTANCE_ID_1, results) self.mock_for_query('nova', nova.GET_INSTANCE_SYSTEM_METADATA % INSTANCE_ID_1, metadata_results) self.mox.ReplayAll() instance = self.client.get_instance('RegionOne', INSTANCE_ID_1, get_metadata=True) self.assertIsNotNone(instance) self.assertEqual(instance['id'], INSTANCE_ID_1) self.assertEqual(instance['instance_flavor_id'], INSTANCE_FLAVOR_ID_1) launched_at_dec = stackutils.str_time_to_unix(launched_at) self.assertEqual(instance['launched_at'], launched_at_dec) terminated_at_dec = stackutils.str_time_to_unix(terminated_at) self.assertEqual(instance['deleted_at'], terminated_at_dec) self.assertTrue(instance['deleted']) self.mox.VerifyAll()
def _get_filter_args(klass, request, custom_filters=None): filter_args = {} if 'instance' in request.GET: uuid = request.GET['instance'] filter_args['instance'] = uuid if not utils.is_uuid_like(uuid): msg = "%s is not uuid-like" % uuid raise BadRequestException(msg) for (key, value) in request.GET.items(): if not custom_filters or key not in custom_filters: if key.endswith('_min'): k = key[0:-4] _check_has_field(klass, k) try: filter_args['%s__gte' % k] = \ utils.str_time_to_unix(value) except AttributeError: msg = "Range filters must be dates." raise BadRequestException(message=msg) elif key.endswith('_max'): k = key[0:-4] _check_has_field(klass, k) try: filter_args['%s__lte' % k] = \ utils.str_time_to_unix(value) except AttributeError: msg = "Range filters must be dates." raise BadRequestException(message=msg) return filter_args
def __init__(self, body, deployment, routing_key, json): super(GlanceNotification, self).__init__(body, deployment, routing_key, json) if isinstance(self.payload, dict): self.properties = self.payload.get('properties', {}) self.image_type = image_type.get_numeric_code(self.payload) self.status = self.payload.get('status', None) self.uuid = self.payload.get('id', None) self.size = self.payload.get('size', None) created_at = self.payload.get('created_at', None) self.created_at = created_at and utils.str_time_to_unix(created_at) audit_period_beginning = self.payload.get( 'audit_period_beginning', None) self.audit_period_beginning = audit_period_beginning and\ utils.str_time_to_unix(audit_period_beginning) audit_period_ending = self.payload.get( 'audit_period_ending', None) self.audit_period_ending = audit_period_ending and \ utils.str_time_to_unix(audit_period_ending) else: self.properties = {} self.image_type = None self.status = None self.uuid = None self.size = None self.created_at = None self.audit_period_beginning = None self.audit_period_ending = None
def get_event_stats(request): try: filters = {} if 'when_min' in request.GET or 'when_max' in request.GET: if not ('when_min' in request.GET and 'when_max' in request.GET): msg = "When providing date range filters, " \ "a min and max are required." raise BadRequestException(message=msg) when_min = utils.str_time_to_unix(request.GET['when_min']) when_max = utils.str_time_to_unix(request.GET['when_max']) if when_max - when_min > HARD_WHEN_RANGE_LIMIT: msg = "Date ranges may be no larger than %s seconds" raise BadRequestException(message=msg % HARD_WHEN_RANGE_LIMIT) filters['when__lte'] = when_max filters['when__gte'] = when_min if 'event' in request.GET: filters['event'] = request.GET['event'] service = request.GET.get("service", "nova") rawdata = _rawdata_factory(service) return {'stats': {'count': rawdata.filter(**filters).count()}} except (KeyError, TypeError): raise BadRequestException(message="Invalid/absent query parameter") except (ValueError, AttributeError): raise BadRequestException(message="Invalid format for date (Correct " "format should be %Y-%m-%d %H:%M:%S)")
def test_get_instance_with_metadata(self): launched_at = datetime.datetime.utcnow() - datetime.timedelta(minutes=5) launched_at = str(launched_at) terminated_at = str(datetime.datetime.utcnow()) results = [self._fake_instance(launched_at=launched_at, terminated_at=terminated_at, deleted=True)] metadata_results = self._fake_metadata() self.mock_for_query('nova', nova.GET_INSTANCE_QUERY % INSTANCE_ID_1, results) self.mock_for_query('nova', nova.GET_INSTANCE_SYSTEM_METADATA % INSTANCE_ID_1, metadata_results) self.mox.ReplayAll() instance = self.client.get_instance('RegionOne', INSTANCE_ID_1, get_metadata=True) self.assertIsNotNone(instance) self.assertEqual(instance['id'], INSTANCE_ID_1) self.assertEqual(instance['instance_flavor_id'], INSTANCE_FLAVOR_ID_1) launched_at_dec = stackutils.str_time_to_unix(launched_at) self.assertEqual(instance['launched_at'], launched_at_dec) terminated_at_dec = stackutils.str_time_to_unix(terminated_at) self.assertEqual(instance['deleted_at'], terminated_at_dec) self.assertTrue(instance['deleted']) self.mox.VerifyAll()
def _process_exists(raw, body): payload = body['payload'] instance_id = payload['instance_id'] launched_at = utils.str_time_to_unix(payload['launched_at']) launched_range = (launched_at, launched_at+1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) delete = STACKDB.get_instance_delete(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = body['message_id'] values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(payload['audit_period_beginning']) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(payload['audit_period_ending']) values['audit_period_ending'] = ending values['instance_type_id'] = payload['instance_type_id'] if usage: values['usage'] = usage if delete: values['delete'] = delete values['raw'] = raw deleted_at = payload.get('deleted_at') if deleted_at and deleted_at != '': deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists)
def _process_exists(raw, body): payload = body['payload'] instance_id = payload['instance_id'] launched_at = utils.str_time_to_unix(payload['launched_at']) launched_range = (launched_at, launched_at+1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = body['message_id'] values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(payload['audit_period_beginning']) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(payload['audit_period_ending']) values['audit_period_ending'] = ending values['instance_type_id'] = payload['instance_type_id'] if usage: values['usage'] = usage values['raw'] = raw deleted_at = payload.get('deleted_at') if deleted_at and deleted_at != '': # We only want to pre-populate the 'delete' if we know this is in fact # an exist event for a deleted instance. Otherwise, there is a # chance we may populate it for a previous period's exist. delete = STACKDB.get_instance_delete(instance=instance_id, launched_at__range=launched_range) deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at if delete: values['delete'] = delete exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists)
def _process_exists(raw): notif = json.loads(raw.json) payload = notif[1]['payload'] instance_id = payload['instance_id'] launched_at = utils.str_time_to_unix(payload['launched_at']) launched_range = (launched_at, launched_at+1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) delete = STACKDB.get_instance_delete(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = notif[1]['message_id'] values['instance'] = instance_id values['launched_at'] = launched_at values['instance_type_id'] = payload['instance_type_id'] if usage: values['usage'] = usage if delete: values['delete'] = delete values['raw'] = raw deleted_at = payload.get('deleted_at') if deleted_at and deleted_at != '': deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists)
def test_save_image_exists_with_delete_not_none(self): raw = self.mox.CreateMockAnything() delete = self.mox.CreateMockAnything() audit_period_beginning = "2013-05-20 17:31:57.939614" audit_period_ending = "2013-06-20 17:31:57.939614" size = 123 uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" deleted_at = "2013-06-20 14:31:57.939614" body = { "event_type": "image.upload", "timestamp": "2013-06-20 18:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "created_at": str(DUMMY_TIME), "status": "saving", "audit_period_beginning": audit_period_beginning, "audit_period_ending": audit_period_ending, "properties": { "image_type": "snapshot", "instance_uuid": INSTANCE_ID_1, }, "deleted_at": deleted_at, "size": size, "owner": TENANT_ID_1, "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", } } deployment = "1" routing_key = "glance_monitor.info" json = '{["routing_key", {%s}]}' % body self.mox.StubOutWithMock(db, 'create_image_exists') self.mox.StubOutWithMock(db, 'get_image_usage') self.mox.StubOutWithMock(db, 'get_image_delete') created_at_range = (DECIMAL_DUMMY_TIME, DECIMAL_DUMMY_TIME+1) db.get_image_usage(created_at__range=created_at_range, uuid=uuid).AndReturn(None) db.get_image_delete(created_at__range=created_at_range, uuid=uuid).AndReturn(delete) db.create_image_exists( created_at=utils.str_time_to_unix(str(DUMMY_TIME)), owner=TENANT_ID_1, raw=raw, audit_period_beginning=utils.str_time_to_unix(audit_period_beginning), audit_period_ending=utils.str_time_to_unix(audit_period_ending), size=size, uuid=uuid, usage=None, delete=delete, deleted_at=utils.str_time_to_unix(str(deleted_at))).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json) notification.save_exists(raw) self.mox.VerifyAll()
def _process_delete(raw, notification): if notification.launched_at and notification.launched_at != '': instance_id = notification.instance deleted_at = utils.str_time_to_unix(notification.deleted_at) launched_at = utils.str_time_to_unix(notification.launched_at) values = { 'instance': instance_id, 'deleted_at': deleted_at, 'launched_at': launched_at } (delete, new) = STACKDB.get_or_create_instance_delete(**values) delete.raw = raw STACKDB.save(delete)
def test_str_time_to_unix(self): self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15T11:51:11Z"), decimal.Decimal('1368618671')) self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15T11:51:11.123Z"), decimal.Decimal('1368618671.123')) self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15T11:51:11"), decimal.Decimal('1368618671')) self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15T11:51:11.123"), decimal.Decimal('1368618671.123')) self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15 11:51:11"), decimal.Decimal('1368618671')) self.assertEqual( stacktach_utils.str_time_to_unix("2013-05-15 11:51:11.123"), decimal.Decimal('1368618671.123')) with self.assertRaises(Exception): stacktach_utils.str_time_to_unix("invalid date"), decimal.Decimal('1368618671')
def save_exists(self, raw): if isinstance(self.payload, dict): audit_period_beginning = self.payload.get('audit_period_beginning', None) audit_period_beginning = audit_period_beginning and\ utils.str_time_to_unix(audit_period_beginning) audit_period_ending = self.payload.get('audit_period_ending', None) audit_period_ending = audit_period_ending and \ utils.str_time_to_unix(audit_period_ending) message_id = self.message_id images = self.payload.get('images', []) else: stacklog.warn("Received exists with invalid payload " "GlanceRawData(%s)" % raw.id) audit_period_beginning = None audit_period_ending = None images = [] for image in images: created_at = image['created_at'] created_at = created_at and utils.str_time_to_unix(created_at) uuid = image['id'] deleted_at = image['deleted_at'] deleted_at = deleted_at and utils.str_time_to_unix(deleted_at) if created_at: values = { 'uuid': uuid, 'audit_period_beginning': audit_period_beginning, 'audit_period_ending': audit_period_ending, 'owner': self.owner, 'size': image['size'], 'raw': raw, 'message_id': message_id } usage = db.get_image_usage(uuid=uuid) values['usage'] = usage values['created_at'] = created_at if deleted_at: delete = db.get_image_delete(uuid=uuid) values['delete'] = delete values['deleted_at'] = deleted_at db.create_image_exists(**values) else: stacklog.warn( "Ignoring exists without created_at. GlanceRawData(%s)" % raw.id)
def _process_exists(raw, notification): instance_id = notification.instance launched_at_str = notification.launched_at if launched_at_str is not None and launched_at_str != '': launched_at = utils.str_time_to_unix(notification.launched_at) launched_range = (launched_at, launched_at + 1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = notification.message_id values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(notification.audit_period_beginning) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(notification.audit_period_ending) values['audit_period_ending'] = ending values['instance_type_id'] = notification.instance_type_id values['instance_flavor_id'] = notification.instance_flavor_id if usage: values['usage'] = usage values['raw'] = raw values['tenant'] = notification.tenant values['rax_options'] = notification.rax_options values['os_architecture'] = notification.os_architecture values['os_version'] = notification.os_version values['os_distro'] = notification.os_distro values['bandwidth_public_out'] = notification.bandwidth_public_out deleted_at = notification.deleted_at if deleted_at and deleted_at != '': # We only want to pre-populate the 'delete' if we know this is in # fact an exist event for a deleted instance. Otherwise, there # is a chance we may populate it for a previous period's exist. filter = { 'instance': instance_id, 'launched_at__range': launched_range } delete = STACKDB.get_instance_delete(**filter) deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at if delete: values['delete'] = delete exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists) else: stacklog.warn("Ignoring exists without launched_at. RawData(%s)" % raw.id)
def save_exists(self, raw): if isinstance(self.payload, dict): audit_period_beginning = self.payload.get( 'audit_period_beginning', None) audit_period_beginning = audit_period_beginning and\ utils.str_time_to_unix(audit_period_beginning) audit_period_ending = self.payload.get( 'audit_period_ending', None) audit_period_ending = audit_period_ending and \ utils.str_time_to_unix(audit_period_ending) message_id = self.message_id images = self.payload.get('images', []) else: stacklog.warn("Received exists with invalid payload " "GlanceRawData(%s)" % raw.id) audit_period_beginning = None audit_period_ending = None images = [] for image in images: created_at = image['created_at'] created_at = created_at and utils.str_time_to_unix(created_at) uuid = image['id'] deleted_at = image['deleted_at'] deleted_at = deleted_at and utils.str_time_to_unix(deleted_at) if created_at: values = { 'uuid': uuid, 'audit_period_beginning': audit_period_beginning, 'audit_period_ending': audit_period_ending, 'owner': self.owner, 'size': image['size'], 'raw': raw, 'message_id': message_id } usage = db.get_image_usage(uuid=uuid) values['usage'] = usage values['created_at'] = created_at if deleted_at: delete = db.get_image_delete(uuid=uuid) values['delete'] = delete values['deleted_at'] = deleted_at db.create_image_exists(**values) else: stacklog.warn("Ignoring exists without created_at. GlanceRawData(%s)" % raw.id)
def _process_usage_for_updates(raw, body): payload = body['payload'] if raw.event == INSTANCE_EVENT['create_end']: if 'message' in payload and payload['message'] != 'Success': return instance_id = payload['instance_id'] request_id = body['_context_request_id'] (usage, new) = STACKDB.get_or_create_instance_usage(instance=instance_id, request_id=request_id) if raw.event in [INSTANCE_EVENT['create_end'], INSTANCE_EVENT['rebuild_end'], INSTANCE_EVENT['resize_finish_end'], INSTANCE_EVENT['resize_revert_end']]: usage.launched_at = utils.str_time_to_unix(payload['launched_at']) if raw.event == INSTANCE_EVENT['resize_revert_end']: usage.instance_type_id = payload['instance_type_id'] elif raw.event == INSTANCE_EVENT['resize_prep_end']: usage.instance_type_id = payload['new_instance_type_id'] usage.tenant = payload['tenant_id'] image_meta = payload.get('image_meta', {}) usage.rax_options = image_meta.get('com.rackspace__1__options', '') usage.os_architecture = image_meta.get('org.openstack__1__architecture', '') usage.os_version = image_meta.get('org.openstack__1__os_version', '') usage.os_distro = image_meta.get('org.openstack__1__os_distro', '') STACKDB.save(usage)
def _process_usage_for_new_launch(raw, body): payload = body['payload'] values = {} values['instance'] = payload['instance_id'] values['request_id'] = body['_context_request_id'] (usage, new) = STACKDB.get_or_create_instance_usage(**values) if raw.event in [INSTANCE_EVENT['create_start'], INSTANCE_EVENT['rebuild_start']]: usage.instance_type_id = payload['instance_type_id'] if raw.event in [INSTANCE_EVENT['rebuild_start'], INSTANCE_EVENT['resize_prep_start'], INSTANCE_EVENT['resize_revert_start']] and\ usage.launched_at is None: # Grab the launched_at so if this action spans the audit period, # we will have a launch record corresponding to the exists. # We don't want to override a launched_at if it is already set # though, because we may have already received the end event usage.launched_at = utils.str_time_to_unix(payload['launched_at']) usage.tenant = payload['tenant_id'] image_meta = payload.get('image_meta', {}) usage.rax_options = image_meta.get('com.rackspace__1__options', '') usage.os_architecture = image_meta.get('org.openstack__1__architecture', '') usage.os_version = image_meta.get('org.openstack__1__os_version', '') usage.os_distro = image_meta.get('org.openstack__1__os_distro', '') STACKDB.save(usage)
def deleted_at(self): deleted_at = self.body.get('deleted_at', None) if isinstance(self.payload, dict): deleted_at = deleted_at or self.payload.get('deleted_at', None) return deleted_at and utils.str_time_to_unix(deleted_at)
def _process_usage_for_updates(raw, body): payload = body['payload'] if raw.event == INSTANCE_EVENT['create_end']: if 'message' in payload and payload['message'] != 'Success': return instance_id = payload['instance_id'] request_id = body['_context_request_id'] (usage, new) = STACKDB.get_or_create_instance_usage(instance=instance_id, request_id=request_id) if raw.event in [INSTANCE_EVENT['create_end'], INSTANCE_EVENT['rebuild_end'], INSTANCE_EVENT['resize_finish_end'], INSTANCE_EVENT['resize_revert_end']]: usage.launched_at = utils.str_time_to_unix(payload['launched_at']) if raw.event == INSTANCE_EVENT['resize_revert_end']: usage.instance_type_id = payload['instance_type_id'] elif raw.event == INSTANCE_EVENT['resize_prep_end']: usage.instance_type_id = payload['new_instance_type_id'] usage.tenant = payload['tenant_id'] STACKDB.save(usage)
def test_save_should_persist_glance_rawdata_erro_payload_to_database(self): body = { "event_type": "image.upload", "timestamp": "2013-06-20 17:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": "error_message" } deployment = "1" routing_key = "glance_monitor.error" json_body = json.dumps([routing_key, body]) raw = self.mox.CreateMockAnything() self.mox.StubOutWithMock(db, 'create_glance_rawdata') db.create_glance_rawdata( deployment="1", owner=None, json=json_body, routing_key=routing_key, when=utils.str_time_to_unix("2013-06-20 17:31:57.939614"), publisher="glance-api01-r2961.global.preprod-ord.ohthree.com", event="image.upload", service="glance-api01-r2961", host="global.preprod-ord.ohthree.com", instance=None, request_id='', image_type=None, status=None, uuid=None).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json_body) self.assertEquals(notification.save(), raw) self.mox.VerifyAll()
def _process_usage_for_updates(raw, notification): if raw.event == INSTANCE_EVENT['create_end']: if notification.message and notification.message != 'Success': return instance_id = notification.instance request_id = notification.request_id (usage, new) = STACKDB.get_or_create_instance_usage(instance=instance_id, request_id=request_id) if raw.event in [INSTANCE_EVENT['create_end'], INSTANCE_EVENT['rebuild_end'], INSTANCE_EVENT['resize_finish_end'], INSTANCE_EVENT['resize_revert_end'], INSTANCE_EVENT['rescue_end']]: usage.launched_at = utils.str_time_to_unix(notification.launched_at) if raw.event in [INSTANCE_EVENT['resize_revert_end'], INSTANCE_EVENT['resize_finish_start'], INSTANCE_EVENT['resize_finish_end']]: usage.instance_type_id = notification.instance_type_id usage.instance_flavor_id = notification.instance_flavor_id usage.tenant = notification.tenant usage.rax_options = notification.rax_options usage.os_architecture = notification.os_architecture usage.os_version = notification.os_version usage.os_distro = notification.os_distro STACKDB.save(usage)
def _process_usage_for_updates(raw, body): payload = body['payload'] if raw.event == INSTANCE_EVENT['create_end']: if 'message' in payload and payload['message'] != 'Success': return instance_id = payload['instance_id'] request_id = body['_context_request_id'] (usage, new) = STACKDB.get_or_create_instance_usage(instance=instance_id, request_id=request_id) if raw.event in [ INSTANCE_EVENT['create_end'], INSTANCE_EVENT['rebuild_end'], INSTANCE_EVENT['resize_finish_end'], INSTANCE_EVENT['resize_revert_end'] ]: usage.launched_at = utils.str_time_to_unix(payload['launched_at']) if raw.event == INSTANCE_EVENT['resize_revert_end']: usage.instance_type_id = payload['instance_type_id'] elif raw.event == INSTANCE_EVENT['resize_prep_end']: usage.instance_type_id = payload['new_instance_type_id'] usage.tenant = payload['tenant_id'] STACKDB.save(usage)
def test_save_usage_should_persist_image_usage(self): raw = self.mox.CreateMockAnything() size = 123 uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" body = { "event_type": "image.upload", "timestamp": "2013-06-20 18:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "created_at": str(DUMMY_TIME), "size": size, "owner": TENANT_ID_1, "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", } } deployment = "1" routing_key = "glance_monitor.info" json = '{["routing_key", {%s}]}' % body self.mox.StubOutWithMock(db, 'create_image_usage') db.create_image_usage( created_at=utils.str_time_to_unix(str(DUMMY_TIME)), owner=TENANT_ID_1, last_raw=raw, size=size, uuid=uuid).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json) notification.save_usage(raw) self.mox.VerifyAll()
def _process_usage_for_updates(raw, notification): if raw.event == INSTANCE_EVENT['create_end']: if notification.message and notification.message != 'Success': return instance_id = notification.instance request_id = notification.request_id (usage, new) = STACKDB.get_or_create_instance_usage(instance=instance_id, request_id=request_id) if raw.event in [ INSTANCE_EVENT['create_end'], INSTANCE_EVENT['rebuild_end'], INSTANCE_EVENT['resize_finish_end'], INSTANCE_EVENT['resize_revert_end'], INSTANCE_EVENT['rescue_end'] ]: usage.launched_at = utils.str_time_to_unix(notification.launched_at) if raw.event in [ INSTANCE_EVENT['resize_revert_end'], INSTANCE_EVENT['resize_finish_end'] ]: usage.instance_type_id = notification.instance_type_id usage.instance_flavor_id = notification.instance_flavor_id usage.tenant = notification.tenant usage.rax_options = notification.rax_options usage.os_architecture = notification.os_architecture usage.os_version = notification.os_version usage.os_distro = notification.os_distro STACKDB.save(usage)
def test_save_delete_should_persist_image_delete(self): raw = self.mox.CreateMockAnything() uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" deleted_at = "2013-06-20 14:31:57.939614" body = { "event_type": "image.delete", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", "deleted_at": deleted_at } } deployment = "1" routing_key = "glance_monitor.info" json_body = json.dumps([routing_key, body]) self.mox.StubOutWithMock(db, 'create_image_delete') db.create_image_delete( raw=raw, uuid=uuid, deleted_at=utils.str_time_to_unix(deleted_at)).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json_body) notification.save_delete(raw) self.mox.VerifyAll()
def test_save_usage_should_persist_image_usage(self): raw = self.mox.CreateMockAnything() size = 123 uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" body = { "event_type": "image.upload", "timestamp": "2013-06-20 18:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "created_at": str(DUMMY_TIME), "size": size, "owner": TENANT_ID_1, "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", } } deployment = "1" routing_key = "glance_monitor.info" json_body = json.dumps([routing_key, body]) self.mox.StubOutWithMock(db, 'create_image_usage') db.create_image_usage(created_at=utils.str_time_to_unix( str(DUMMY_TIME)), owner=TENANT_ID_1, last_raw=raw, size=size, uuid=uuid).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json_body) notification.save_usage(raw) self.mox.VerifyAll()
def process_raw_data(deployment, args, json_args): """This is called directly by the worker to add the event to the db.""" db.reset_queries() routing_key, body = args record = None handler = HANDLERS.get(routing_key, None) if handler: values = handler(routing_key, body) if not values: return record values['deployment'] = deployment try: when = body['timestamp'] except KeyError: when = body['_context_timestamp'] # Old way of doing it values['when'] = utils.str_time_to_unix(when) values['routing_key'] = routing_key values['json'] = json_args record = STACKDB.create_rawdata(**values) STACKDB.save(record) aggregate_lifecycle(record) aggregate_usage(record, body) return record
def _process_usage_for_new_launch(raw, notification): values = {} values['instance'] = notification.instance values['request_id'] = notification.request_id (usage, new) = STACKDB.get_or_create_instance_usage(**values) if raw.event in [INSTANCE_EVENT['create_start'], INSTANCE_EVENT['rebuild_start']]: usage.instance_type_id = notification.instance_type_id if raw.event in [INSTANCE_EVENT['rebuild_start'], INSTANCE_EVENT['resize_prep_start'], INSTANCE_EVENT['resize_revert_start']] and\ usage.launched_at is None: # Grab the launched_at so if this action spans the audit period, # we will have a launch record corresponding to the exists. # We don't want to override a launched_at if it is already set # though, because we may have already received the end event usage.launched_at = utils.str_time_to_unix(notification.launched_at) usage.tenant = notification.tenant usage.rax_options = notification.rax_options usage.os_architecture = notification.os_architecture usage.os_version = notification.os_version usage.os_distro = notification.os_distro STACKDB.save(usage)
def _process_usage_for_new_launch(raw, body): payload = body['payload'] values = {} values['instance'] = payload['instance_id'] values['request_id'] = body['_context_request_id'] (usage, new) = STACKDB.get_or_create_instance_usage(**values) if raw.event in [ INSTANCE_EVENT['create_start'], INSTANCE_EVENT['rebuild_start'] ]: usage.instance_type_id = payload['instance_type_id'] if raw.event in [INSTANCE_EVENT['rebuild_start'], INSTANCE_EVENT['resize_prep_start'], INSTANCE_EVENT['resize_revert_start']] and\ usage.launched_at is None: # Grab the launched_at so if this action spans the audit period, # we will have a launch record corresponding to the exists. # We don't want to override a launched_at if it is already set # though, because we may have already received the end event usage.launched_at = utils.str_time_to_unix(payload['launched_at']) usage.tenant = payload['tenant_id'] STACKDB.save(usage)
def test_save_delete_should_persist_image_delete(self): raw = self.mox.CreateMockAnything() uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" deleted_at = "2013-06-20 14:31:57.939614" body = { "event_type": "image.delete", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", "deleted_at": deleted_at } } deployment = "1" routing_key = "glance_monitor.info" json = '{["routing_key", {%s}]}' % body self.mox.StubOutWithMock(db, 'create_image_delete') db.create_image_delete( raw=raw, uuid=uuid, deleted_at=utils.str_time_to_unix(deleted_at)).AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json) notification.save_delete(raw) self.mox.VerifyAll()
def _process_exists(raw, body): payload = body['payload'] instance_id = payload['instance_id'] launched_at_str = payload.get('launched_at') if launched_at_str is not None and launched_at_str != '': launched_at = utils.str_time_to_unix(payload['launched_at']) launched_range = (launched_at, launched_at+1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = body['message_id'] values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(payload['audit_period_beginning']) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(payload['audit_period_ending']) values['audit_period_ending'] = ending values['instance_type_id'] = payload['instance_type_id'] if usage: values['usage'] = usage values['raw'] = raw values['tenant'] = payload['tenant_id'] image_meta = payload.get('image_meta', {}) values['rax_options'] = image_meta.get('com.rackspace__1__options', '') os_arch = image_meta.get('org.openstack__1__architecture', '') values['os_architecture'] = os_arch os_version = image_meta.get('org.openstack__1__os_version', '') values['os_version'] = os_version values['os_distro'] = image_meta.get('org.openstack__1__os_distro', '') deleted_at = payload.get('deleted_at') if deleted_at and deleted_at != '': # We only want to pre-populate the 'delete' if we know this is in # fact an exist event for a deleted instance. Otherwise, there # is a chance we may populate it for a previous period's exist. filter = {'instance': instance_id, 'launched_at__range': launched_range} delete = STACKDB.get_instance_delete(**filter) deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at if delete: values['delete'] = delete exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists) else: stacklog.warn("Ignoring exists without launched_at. RawData(%s)" % raw.id)
def test_save_should_persist_nova_rawdata_to_database(self): body = { "event_type": "compute.instance.exists", '_context_request_id': REQUEST_ID_1, '_context_project_id': TENANT_ID_1, "timestamp": TIMESTAMP_1, "publisher_id": "compute.global.preprod-ord.ohthree.com", "payload": { 'instance_id': INSTANCE_ID_1, "status": "saving", "container_format": "ovf", "properties": { "image_type": "snapshot", }, "tenant": "5877054", "old_state": 'old_state', "old_task_state": 'old_task', "image_meta": { "org.openstack__1__architecture": 'os_arch', "org.openstack__1__os_distro": 'os_distro', "org.openstack__1__os_version": 'os_version', "com.rackspace__1__options": 'rax_opt', }, "state": 'state', "new_task_state": 'task' } } deployment = "1" routing_key = "monitor.info" json_body = json.dumps([routing_key, body]) raw = self.mox.CreateMockAnything() self.mox.StubOutWithMock(db, 'create_nova_rawdata') db.create_nova_rawdata( deployment="1", tenant=TENANT_ID_1, json=json_body, routing_key=routing_key, when=utils.str_time_to_unix(TIMESTAMP_1), publisher="compute.global.preprod-ord.ohthree.com", event="compute.instance.exists", service="compute", host="global.preprod-ord.ohthree.com", instance=INSTANCE_ID_1, request_id=REQUEST_ID_1, image_type=image_type.get_numeric_code(body['payload']), old_state='old_state', old_task='old_task', os_architecture='os_arch', os_distro='os_distro', os_version='os_version', rax_options='rax_opt', state='state', task='task').AndReturn(raw) self.mox.ReplayAll() notification = NovaNotification(body, deployment, routing_key, json_body) self.assertEquals(notification.save(), raw) self.mox.VerifyAll()
def _process_delete(raw, body): payload = body['payload'] instance_id = payload['instance_id'] deleted_at = utils.str_time_to_unix(payload['deleted_at']) values = { 'instance': instance_id, 'deleted_at': deleted_at, 'raw': raw } launched_at = payload.get('launched_at') if launched_at and launched_at != '': launched_at = utils.str_time_to_unix(launched_at) values['launched_at'] = launched_at delete = STACKDB.create_instance_delete(**values) STACKDB.save(delete)
def _process_delete(raw, body): payload = body['payload'] instance_id = payload['instance_id'] deleted_at = utils.str_time_to_unix(payload['deleted_at']) values = { 'instance': instance_id, 'deleted_at': deleted_at, } (delete, new) = STACKDB.get_or_create_instance_delete(**values) delete.raw = raw launched_at = payload.get('launched_at') if launched_at and launched_at != '': launched_at = utils.str_time_to_unix(launched_at) delete.launched_at = launched_at STACKDB.save(delete)
def _get_exists_filter_args(request): try: custom_filters = {} if 'received_min' in request.GET: received_min = request.GET['received_min'] custom_filters['received_min'] = {} custom_filters['received_min']['raw__when__gte'] = \ utils.str_time_to_unix(received_min) if 'received_max' in request.GET: received_max = request.GET['received_max'] custom_filters['received_max'] = {} custom_filters['received_max']['raw__when__lte'] = \ utils.str_time_to_unix(received_max) except AttributeError: msg = "Range filters must be dates." raise BadRequestException(message=msg) return custom_filters
def _process_exists(raw, notification): instance_id = notification.instance launched_at_str = notification.launched_at if launched_at_str is not None and launched_at_str != '': launched_at = utils.str_time_to_unix(notification.launched_at) launched_range = (launched_at, launched_at+1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = notification.message_id values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(notification.audit_period_beginning) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(notification.audit_period_ending) values['audit_period_ending'] = ending values['instance_type_id'] = notification.instance_type_id values['instance_flavor_id'] = notification.instance_flavor_id if usage: values['usage'] = usage values['raw'] = raw values['tenant'] = notification.tenant values['rax_options'] = notification.rax_options values['os_architecture'] = notification.os_architecture values['os_version'] = notification.os_version values['os_distro'] = notification.os_distro values['bandwidth_public_out'] = notification.bandwidth_public_out deleted_at = notification.deleted_at if deleted_at and deleted_at != '': # We only want to pre-populate the 'delete' if we know this is in # fact an exist event for a deleted instance. Otherwise, there # is a chance we may populate it for a previous period's exist. filter = {'instance': instance_id, 'launched_at__range': launched_range} delete = STACKDB.get_instance_delete(**filter) deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at if delete: values['delete'] = delete exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists) else: stacklog.warn("Ignoring exists without launched_at. RawData(%s)" % raw.id)
def _to_reconciler_instance(self, instance): r_instance = empty_reconciler_instance() r_instance.update({ 'id': instance['uuid'], 'instance_type_id': instance['instance_type_id'], }) if instance['launched_at'] is not None: launched_at = stackutils.str_time_to_unix(instance['launched_at']) r_instance['launched_at'] = launched_at if instance['terminated_at'] is not None: deleted_at = stackutils.str_time_to_unix(instance['terminated_at']) r_instance['deleted_at'] = deleted_at if instance['deleted'] != 0: r_instance['deleted'] = True return r_instance
def _process_exists(raw, body): payload = body['payload'] instance_id = payload['instance_id'] launched_at_str = payload.get('launched_at') if launched_at_str is not None and launched_at_str != '': launched_at = utils.str_time_to_unix(payload['launched_at']) launched_range = (launched_at, launched_at + 1) usage = STACKDB.get_instance_usage(instance=instance_id, launched_at__range=launched_range) values = {} values['message_id'] = body['message_id'] values['instance'] = instance_id values['launched_at'] = launched_at beginning = utils.str_time_to_unix(payload['audit_period_beginning']) values['audit_period_beginning'] = beginning ending = utils.str_time_to_unix(payload['audit_period_ending']) values['audit_period_ending'] = ending values['instance_type_id'] = payload['instance_type_id'] if usage: values['usage'] = usage values['raw'] = raw values['tenant'] = payload['tenant_id'] deleted_at = payload.get('deleted_at') if deleted_at and deleted_at != '': # We only want to pre-populate the 'delete' if we know this is in # fact an exist event for a deleted instance. Otherwise, there # is a chance we may populate it for a previous period's exist. filter = { 'instance': instance_id, 'launched_at__range': launched_range } delete = STACKDB.get_instance_delete(**filter) deleted_at = utils.str_time_to_unix(deleted_at) values['deleted_at'] = deleted_at if delete: values['delete'] = delete exists = STACKDB.create_instance_exists(**values) STACKDB.save(exists) else: stacklog.warn("Ignoring exists without launched_at. RawData(%s)" % raw.id)
def list_usage_exists(request): try: custom_filters = {} if 'received_min' in request.GET: received_min = request.GET['received_min'] custom_filters['received_min'] = {} custom_filters['received_min']['raw__when__gte'] = \ utils.str_time_to_unix(received_min) if 'received_max' in request.GET: received_max = request.GET['received_max'] custom_filters['received_max'] = {} custom_filters['received_max']['raw__when__lte'] = \ utils.str_time_to_unix(received_max) except AttributeError: msg = "Range filters must be dates." raise BadRequestException(message=msg) objects = get_db_objects(models.InstanceExists, request, 'id', custom_filters=custom_filters) dicts = _convert_model_list(objects, _exists_extra_values) return {'exists': dicts}
def get_event_stats(request): try: filters = {} if 'when_min' in request.GET or 'when_max' in request.GET: if not ('when_min' in request.GET and 'when_max' in request.GET): msg = "When providing date range filters, " \ "a min and max are required." raise BadRequestException(message=msg) when_min = utils.str_time_to_unix(request.GET['when_min']) when_max = utils.str_time_to_unix(request.GET['when_max']) if when_max - when_min > HARD_WHEN_RANGE_LIMIT: msg = "Date ranges may be no larger than %s seconds" raise BadRequestException(message=msg % HARD_WHEN_RANGE_LIMIT) filters['when__lte'] = when_max filters['when__gte'] = when_min service = request.GET.get("service", "nova") rawdata = _rawdata_factory(service) if filters: rawdata = rawdata.filter(**filters) events = rawdata.values('event').annotate(event_count=Count('event')) events = list(events) if 'event' in request.GET: event = request.GET['event'] default = {'event': event, 'event_count': 0} events = [x for x in events if x['event'] == event] or [ default, ] return {'stats': events} except (KeyError, TypeError): raise BadRequestException(message="Invalid/absent query parameter") except (ValueError, AttributeError): raise BadRequestException(message="Invalid format for date (Correct " "format should be %Y-%m-%d %H:%M:%S)")
def scrub(args, send_notif=lambda x: None): print "Starting scrub." start = utils.str_time_to_unix(args.start) end = utils.str_time_to_unix(args.end) if hasattr(scrubbers, args.scrubber): Scrubber = getattr(scrubbers, args.scrubber) scrubber = Scrubber(start, end) count = 0 for raw in scrubber.raws(): matches, body = scrubber.filter(raw) if matches and not body: body = json.loads(raw['json'])[1] if matches and body: scrubbed = scrubber.scrub(body) count += 1 send_notif(scrubbed) return count else: print "No scrubber class %s." % args.scrubber return 0
def get_event_stats(request): try: filters = {} if 'when_min' in request.GET: when_min = utils.str_time_to_unix(request.GET['when_min']) filters['when__gte'] = when_min if 'when_max' in request.GET: when_max = utils.str_time_to_unix(request.GET['when_max']) filters['when__lte'] = when_max if 'event' in request.GET: filters['event'] = request.GET['event'] service = request.GET.get("service", "nova") rawdata = _rawdata_factory(service) return {'stats': {'count': rawdata.filter(**filters).count()}} except (KeyError, TypeError): raise BadRequestException(message="Invalid/absent query parameter") except (ValueError, AttributeError): raise BadRequestException(message="Invalid format for date (Correct " "format should be %YYYY-%mm-%dd)")
def list_usage_exists_with_service(request, service): model = _exists_model_factory(service) try: custom_filters = {} if 'received_min' in request.GET: received_min = request.GET['received_min'] custom_filters['received_min'] = {} custom_filters['received_min']['raw__when__gte'] = \ utils.str_time_to_unix(received_min) if 'received_max' in request.GET: received_max = request.GET['received_max'] custom_filters['received_max'] = {} custom_filters['received_max']['raw__when__lte'] = \ utils.str_time_to_unix(received_max) except AttributeError: msg = "Range filters must be dates." raise BadRequestException(message=msg) objects = get_db_objects(model['klass'], request, 'id', custom_filters=custom_filters) dicts = _convert_model_list(objects, _exists_extra_values) return {'exists': dicts}
def _to_reconciler_instance(self, instance, metadata=None): r_instance = empty_reconciler_instance() r_instance.update({ 'id': instance['uuid'], 'tenant': instance['project_id'], 'instance_type_id': str(instance['instance_type_id']), 'instance_flavor_id': str(instance['flavorid']), }) if instance['launched_at'] is not None: launched_at = stackutils.str_time_to_unix(instance['launched_at']) r_instance['launched_at'] = launched_at if instance['terminated_at'] is not None: deleted_at = stackutils.str_time_to_unix(instance['terminated_at']) r_instance['deleted_at'] = deleted_at if instance['deleted'] != 0: r_instance['deleted'] = True if metadata is not None: r_instance.update(metadata) return r_instance
def __init__(self, body, deployment, routing_key, json): super(GlanceNotification, self).__init__(body, deployment, routing_key, json) if isinstance(self.payload, dict): self.properties = self.payload.get('properties', {}) self.image_type = image_type.get_numeric_code(self.payload) self.status = self.payload.get('status', None) self.uuid = self.payload.get('id', None) self.size = self.payload.get('size', None) created_at = self.payload.get('created_at', None) self.created_at = created_at and utils.str_time_to_unix(created_at) else: self.properties = {} self.image_type = None self.status = None self.uuid = None self.size = None self.created_at = None
def test_list_usage_exists_with_received_max(self): fake_request = self.mox.CreateMockAnything() date = str(datetime.datetime.utcnow()) fake_request.GET = {'received_max': date} self.mox.StubOutWithMock(dbapi, 'get_db_objects') unix_date = stacktach_utils.str_time_to_unix(date) custom_filters = {'received_max': {'raw__when__lte': unix_date}} objects = self.mox.CreateMockAnything() dbapi.get_db_objects(models.InstanceExists, fake_request, 'id', custom_filters=custom_filters).AndReturn(objects) self.mox.StubOutWithMock(dbapi, '_convert_model_list') dbapi._convert_model_list(objects, dbapi._exists_extra_values) self.mox.ReplayAll() resp = dbapi.list_usage_exists(fake_request) self.assertEqual(resp.status_code, 200) self.mox.VerifyAll()
def _process_usage_for_new_launch(raw, notification): values = {} values['instance'] = notification.instance values['request_id'] = notification.request_id (usage, new) = STACKDB.get_or_create_instance_usage(**values) if raw.event in [ INSTANCE_EVENT['create_start'], INSTANCE_EVENT['rebuild_start'], INSTANCE_EVENT['rescue_start'] ]: usage.instance_type_id = notification.instance_type_id usage.instance_flavor_id = notification.instance_flavor_id if raw.event in [INSTANCE_EVENT['rebuild_start'], INSTANCE_EVENT['resize_prep_start'], INSTANCE_EVENT['resize_revert_start'], INSTANCE_EVENT['rescue_start']] and\ usage.launched_at is None: # Grab the launched_at so if this action spans the audit period, # we will have a launch record corresponding to the exists. # We don't want to override a launched_at if it is already set # though, because we may have already received the end event usage.launched_at = utils.str_time_to_unix(notification.launched_at) if raw.event in [INSTANCE_EVENT['resize_prep_start'], INSTANCE_EVENT['resize_revert_start']] and\ usage.instance_type_id is None and\ usage.instance_flavor_id is None: # Grab the flavor details and populate them if they aren't # already. This should happen just in case we get an exists # mid resize/revert. That can happen if the action spans # multiple audit periods, or if the compute node is restarted # mid action and another resize is kicked off. usage.instance_type_id = notification.instance_type_id usage.instance_flavor_id = notification.instance_flavor_id usage.tenant = notification.tenant usage.rax_options = notification.rax_options usage.os_architecture = notification.os_architecture usage.os_version = notification.os_version usage.os_distro = notification.os_distro STACKDB.save(usage)
def test_save_should_persist_glance_rawdata_to_database(self): body = { "event_type": "image.upload", "timestamp": "2013-06-20 17:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "payload": { "status": "saving", "properties": { "image_type": "snapshot", "instance_uuid": INSTANCE_ID_1, }, "owner": TENANT_ID_1, "id": "2df2ccf6-bc1b-4853-aab0-25fda346b3bb", } } deployment = "1" routing_key = "glance_monitor.info" json_body = json.dumps([routing_key, body]) raw = self.mox.CreateMockAnything() self.mox.StubOutWithMock(db, 'create_glance_rawdata') db.create_glance_rawdata( deployment="1", owner=TENANT_ID_1, json=json_body, routing_key=routing_key, when=utils.str_time_to_unix("2013-06-20 17:31:57.939614"), publisher="glance-api01-r2961.global.preprod-ord.ohthree.com", event="image.upload", service="glance-api01-r2961", host="global.preprod-ord.ohthree.com", instance=INSTANCE_ID_1, request_id='', image_type=0, status="saving", uuid="2df2ccf6-bc1b-4853-aab0-25fda346b3bb").AndReturn(raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json_body) self.assertEquals(notification.save(), raw) self.mox.VerifyAll()
def test_save_should_persist_generic_rawdata_to_database(self): body = { "event_type": "image.upload", '_context_request_id': REQUEST_ID_1, '_context_project_id': TENANT_ID_1, "timestamp": TIMESTAMP_1, "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "message_id": MESSAGE_ID_1, "payload": { 'instance_id': INSTANCE_ID_1, "status": "saving", "container_format": "ovf", "tenant": "5877054" } } deployment = "1" routing_key = "generic_monitor.info" json_body = json.dumps([routing_key, body]) raw = self.mox.CreateMockAnything() self.mox.StubOutWithMock(db, 'create_generic_rawdata') db.create_generic_rawdata( deployment="1", tenant=TENANT_ID_1, json=json_body, routing_key=routing_key, when=utils.str_time_to_unix(TIMESTAMP_1), publisher="glance-api01-r2961.global.preprod-ord.ohthree.com", event="image.upload", service="glance-api01-r2961", host="global.preprod-ord.ohthree.com", instance=INSTANCE_ID_1, request_id=REQUEST_ID_1, message_id=MESSAGE_ID_1).AndReturn(raw) self.mox.ReplayAll() notification = Notification(body, deployment, routing_key, json_body) self.assertEquals(notification.save(), raw) self.mox.VerifyAll()
def process_raw_data(deployment, args, json_args): """This is called directly by the worker to add the event to the db.""" db.reset_queries() routing_key, body = args record = None handler = HANDLERS.get(routing_key, None) if handler: values = handler(routing_key, body) if not values: return record values['deployment'] = deployment try: when = body['timestamp'] except KeyError: when = body['_context_timestamp'] # Old way of doing it values['when'] = utils.str_time_to_unix(when) values['routing_key'] = routing_key values['json'] = json_args record = STACKDB.create_rawdata(**values) STACKDB.save(record) return record
def test_save_image_exists_with_created_at_and_deleted_at(self): raw = self.mox.CreateMockAnything() delete = self.mox.CreateMockAnything() audit_period_beginning = "2013-05-20 17:31:57.939614" audit_period_ending = "2013-06-20 17:31:57.939614" created_at = "2013-05-20 19:31:57.939614" deleted_at = "2013-05-20 21:31:57.939614" size = 123 uuid = "2df2ccf6-bc1b-4853-aab0-25fda346b3bb" body = { "event_type": "image.exists", "timestamp": "2013-06-20 18:31:57.939614", "publisher_id": "glance-api01-r2961.global.preprod-ord.ohthree.com", "message_id": "d14cfa51-6a0e-4cf8-9130-804738be96d2", "payload": { "audit_period_beginning": audit_period_beginning, "audit_period_ending": audit_period_ending, "owner": TENANT_ID_1, "images": [{ "created_at": created_at, "id": uuid, "size": size, "status": "saving", "properties": { "instance_uuid": INSTANCE_ID_1 }, "deleted_at": deleted_at, }, { "created_at": created_at, "id": uuid, "size": size, "status": "saving", "properties": { "instance_uuid": INSTANCE_ID_1 }, "deleted_at": deleted_at, }] } } deployment = "1" routing_key = "glance_monitor.info" json_body = json.dumps([routing_key, body]) self.mox.StubOutWithMock(db, 'create_image_exists') self.mox.StubOutWithMock(db, 'get_image_usage') self.mox.StubOutWithMock(db, 'get_image_delete') for i in range(0, 2): db.get_image_usage(uuid=uuid).AndReturn(None) db.get_image_delete(uuid=uuid).AndReturn(delete) db.create_image_exists( created_at=utils.str_time_to_unix(created_at), owner=TENANT_ID_1, raw=raw, audit_period_beginning=utils.str_time_to_unix( audit_period_beginning), audit_period_ending=utils.str_time_to_unix( audit_period_ending), size=size, uuid=uuid, usage=None, delete=delete, deleted_at=utils.str_time_to_unix(deleted_at), message_id="d14cfa51-6a0e-4cf8-9130-804738be96d2").AndReturn( raw) self.mox.ReplayAll() notification = GlanceNotification(body, deployment, routing_key, json_body) notification.save_exists(raw) self.mox.VerifyAll()
def test_save_should_persist_nova_rawdata_to_database(self): body = { "event_type": "compute.instance.exists", '_context_request_id': REQUEST_ID_1, '_context_project_id': TENANT_ID_1, "timestamp": TIMESTAMP_1, "publisher_id": "compute.global.preprod-ord.ohthree.com", "payload": { 'instance_id': INSTANCE_ID_1, "status": "saving", "container_format": "ovf", "properties": { "image_type": "snapshot", }, "tenant": "5877054", "old_state": 'old_state', "old_task_state": 'old_task', "image_meta": { "org.openstack__1__architecture": 'os_arch', "org.openstack__1__os_distro": 'os_distro', "org.openstack__1__os_version": 'os_version', "com.rackspace__1__options": 'rax_opt', }, "state": 'state', "new_task_state": 'task', "bandwidth": { "private": { "bw_in": 0, "bw_out": 264902 }, "public": { "bw_in": 0, "bw_out": 1697240969 } } } } deployment = "1" routing_key = "monitor.info" json_body = json.dumps([routing_key, body]) raw = self.mox.CreateMockAnything() self.mox.StubOutWithMock(db, 'create_nova_rawdata') db.create_nova_rawdata( deployment="1", tenant=TENANT_ID_1, json=json_body, routing_key=routing_key, when=utils.str_time_to_unix(TIMESTAMP_1), publisher="compute.global.preprod-ord.ohthree.com", event="compute.instance.exists", service="compute", host="global.preprod-ord.ohthree.com", instance=INSTANCE_ID_1, request_id=REQUEST_ID_1, image_type=image_type.get_numeric_code(body['payload']), old_state='old_state', old_task='old_task', os_architecture='os_arch', os_distro='os_distro', os_version='os_version', rax_options='rax_opt', state='state', task='task').AndReturn(raw) self.mox.ReplayAll() notification = NovaNotification(body, deployment, routing_key, json_body) self.assertEquals(notification.save(), raw) self.mox.VerifyAll()
'audit_period_ending__exact': None } exists = models.InstanceExists.objects.filter(**filters) count = exists.count() start = datetime.datetime.utcnow() print "%s records to populate" % count update_interval = datetime.timedelta(seconds=30) next_update = start + update_interval completed = 0 errored = 0 for exist in exists: try: notif = json.loads(exist.raw.json) payload = notif[1]['payload'] beginning = utils.str_time_to_unix(payload['audit_period_beginning']) exist.audit_period_beginning = beginning ending = utils.str_time_to_unix(payload['audit_period_ending']) exist.audit_period_ending = ending exist.save() completed += 1 except: print "Error with raw %s" % exist.id errored += 1 if datetime.datetime.utcnow() > next_update: print_update(count, completed, errored) next_update = datetime.datetime.utcnow() + update_interval