def pformat(self, indent=0, linesep=os.linesep): """Pretty formats this logbook into a string. >>> from taskflow.persistence import models >>> tmp = models.LogBook("example") >>> print(tmp.pformat()) LogBook: 'example' - uuid = ... - created_at = ... """ cls_name = self.__class__.__name__ lines = ["%s%s: '%s'" % (" " * indent, cls_name, self.name)] lines.extend(_format_shared(self, indent=indent + 1)) lines.extend(_format_meta(self.meta, indent=indent + 1)) if self.created_at is not None: lines.append("%s- created_at = %s" % (" " * (indent + 1), timeutils.isotime(self.created_at))) if self.updated_at is not None: lines.append("%s- updated_at = %s" % (" " * (indent + 1), timeutils.isotime(self.updated_at))) for flow_detail in self: lines.append(flow_detail.pformat(indent=indent + 1, linesep=linesep)) return linesep.join(lines)
def show(self, request, instance): """Detailed view of a single instance.""" ip_v4 = instance.get('access_ip_v4') ip_v6 = instance.get('access_ip_v6') server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "hostId": self._get_host_id(instance) or "", "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance), "created": timeutils.isotime(instance["created_at"]), "updated": timeutils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance), "accessIPv4": str(ip_v4) if ip_v4 is not None else '', "accessIPv6": str(ip_v6) if ip_v6 is not None else '', "links": self._get_links(request, instance["uuid"], self._collection_name), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) return server
def _check_clock_sync_on_agent_start(self, agent_state, agent_time): """Checks if the server and the agent times are in sync. Method checks if the agent time is in sync with the server time on start up. Ignores it, on subsequent re-connects. """ if agent_state.get("start_flag"): time_server_now = timeutils.utcnow() diff = abs(timeutils.delta_seconds(time_server_now, agent_time)) if diff > cfg.CONF.agent_down_time: agent_name = agent_state["agent_type"] time_agent = timeutils.isotime(agent_time) host = agent_state["host"] log_dict = { "host": host, "agent_name": agent_name, "agent_time": time_agent, "threshold": cfg.CONF.agent_down_time, "serv_time": timeutils.isotime(time_server_now), "diff": diff, } LOG.error( _LE( "Message received from the host: %(host)s " "during the registration of %(agent_name)s has " "a timestamp: %(agent_time)s. This differs from " "the current server timestamp: %(serv_time)s by " "%(diff)s seconds, which is more than the " "threshold agent down" "time: %(threshold)s." ), log_dict, )
def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False): """Get instances and joins active during a certain time window. :param:context: nova request context :param:begin: datetime for the start of the time window :param:end: datetime for the end of the time window :param:project_id: used to filter instances by project :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances :param use_slave if True, ship this query off to a DB slave :returns: InstanceList """ # NOTE(mriedem): We have to convert the datetime objects to string # primitives for the remote call. begin = timeutils.isotime(begin) end = timeutils.isotime(end) if end else None return cls._get_active_by_window_joined(context, begin, end, project_id, host, expected_attrs, use_slave=use_slave)
def _images_by_windowed_meta( self, context, period_start, period_stop, project_id=None, metadata=None ): """Simulates first level in database layer. :param context: :param period_start: Datetime :param period_stop: Datetime :param project_id: String|None :param metadata: Dict|None """ # Convert the datetime objects to strings period_start = timeutils.isotime(period_start) period_stop = timeutils.isotime(period_stop) return self.__images_by_windowed_meta( context, period_start, period_stop, project_id, metadata )
def test_get_environment(self): """Test GET request of an environment in ready status""" self._set_policy_rules({"show_environment": "@"}) self.expect_policy_check("show_environment", {"environment_id": "123"}) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now env_id = "123" self._create_fake_environment(env_id=env_id) req = self._get("/environments/{0}".format(env_id)) result = req.get_response(self.api) self.assertEqual(200, result.status_code) expected = { "tenant_id": self.tenant, "id": env_id, "name": "my-env", "version": 0, "created": timeutils.isotime(fake_now)[:-1], "updated": timeutils.isotime(fake_now)[:-1], "acquired_by": None, "services": [], "status": "ready", } self.assertEqual(expected, json.loads(result.body))
def serialize_swift_object(sobject): metadocument = {k: sobject.get(k, None) for k, v in six.iteritems(sobject) if k.lower().startswith("x-object-meta")} object_fields = ('id', 'name', 'account', 'account_id', 'container', 'container_id', 'etag' ) document = {f: sobject.get(f, None) for f in object_fields} document['content_type'] = sobject.get('content-type', None) document['content_length'] = sobject.get('content-length', None) if sobject.get('x-timestamp'): timestamp = float(sobject.get('x-timestamp')) document['created_at'] = \ timeutils.isotime(datetime.datetime.fromtimestamp(timestamp)) if sobject.get('last-modified'): updated_dt = datetime.datetime.strptime( sobject['last-modified'], dateformat) document['updated_at'] = timeutils.isotime(updated_dt) document.update(metadocument) return document
def get(key): if key == "tokens/%s" % VALID_TOKEN: dt = timeutils.utcnow() + datetime.timedelta(minutes=5) return json.dumps(({'access': { 'token': {'id': VALID_TOKEN, 'expires': timeutils.isotime(dt)}, 'user': { 'id': 'user_id1', 'name': 'user_name1', 'tenantId': '123i2910', 'tenantName': 'mytenant', 'roles': [ {'name': 'admin'}, ]}, }}, timeutils.isotime(dt))) if key == "tokens/%s" % VALID_TOKEN2: dt = timeutils.utcnow() + datetime.timedelta(minutes=5) return json.dumps(({'access': { 'token': {'id': VALID_TOKEN2, 'expires': timeutils.isotime(dt)}, 'user': { 'id': 'user_id2', 'name': 'user-good', 'tenantId': 'project-good', 'tenantName': 'goodies', 'roles': [ {'name': 'Member'}, ]}, }}, timeutils.isotime(dt)))
def _usage_from_volume(context, volume_ref, **kw): usage_info = dict(tenant_id=volume_ref['project_id'], host=volume_ref['host'], user_id=volume_ref['user_id'], instance_uuid=volume_ref['instance_uuid'], availability_zone=volume_ref['availability_zone'], volume_id=volume_ref['id'], volume_type=volume_ref['volume_type_id'], display_name=volume_ref['display_name'], launched_at=timeutils.isotime(at= volume_ref['launched_at']), created_at=timeutils.isotime(at= volume_ref['created_at']), status=volume_ref['status'], snapshot_id=volume_ref['snapshot_id'], size=volume_ref['size'], replication_status=volume_ref['replication_status'], replication_extended_status= volume_ref['replication_extended_status'], replication_driver_data= volume_ref['replication_driver_data'], ) usage_info.update(kw) return usage_info
def show(self, request, instance, extend_address=True): """Detailed view of a single instance.""" server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "host": instance.get("host"), "hostId": self._get_host_id(instance) or "", # TODO(alex_xu): '_get_image' return {} when there image_ref # isn't existed in V3 API, we revert it back to return "" in # V2.1. "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance), "created": timeutils.isotime(instance["created_at"]), "updated": timeutils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance, extend_address), "links": self._get_links(request, instance["uuid"], self._collection_name), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) return server
def format_image_notification(image): """ Given a daisy.domain.Image object, return a dictionary of relevant notification information. We purposely do not include 'location' as it may contain credentials. """ return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': timeutils.isotime(image.created_at), 'updated_at': timeutils.isotime(image.updated_at), 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'checksum': image.checksum, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'is_public': image.visibility == 'public', 'properties': dict(image.extra_properties), 'tags': list(image.tags), 'deleted': False, 'deleted_at': None, }
def _check_clock_sync_on_agent_start(self, agent_state, agent_time): """Checks if the server and the agent times are in sync. Method checks if the agent time is in sync with the server time on start up. Ignores it, on subsequent re-connects. """ if agent_state.get('start_flag'): time_server_now = timeutils.utcnow() diff = abs(timeutils.delta_seconds(time_server_now, agent_time)) if True: LOG.debug("### _check_clock_sync_on_agent_start") return if diff > cfg.CONF.agent_down_time: agent_name = agent_state['agent_type'] time_agent = timeutils.isotime(agent_time) host = agent_state['host'] log_dict = {'host': host, 'agent_name': agent_name, 'agent_time': time_agent, 'threshold': cfg.CONF.agent_down_time, 'serv_time': timeutils.isotime(time_server_now), 'diff': diff} LOG.error(_LE("Message received from the host: %(host)s " "during the registration of %(agent_name)s has " "a timestamp: %(agent_time)s. This differs from " "the current server timestamp: %(serv_time)s by " "%(diff)s seconds, which is more than the " "threshold agent down" "time: %(threshold)s."), log_dict)
def test_get_environment(self): """Test GET request of an environment in ready status""" self._set_policy_rules( {'show_environment': '@'} ) self.expect_policy_check('show_environment', {'environment_id': '123'}) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now env_id = '123' self._create_fake_environment(env_id=env_id) req = self._get('/environments/{0}'.format(env_id)) result = req.get_response(self.api) self.assertEqual(200, result.status_code) expected = {'tenant_id': self.tenant, 'id': env_id, 'name': 'my-env', 'version': 0, 'created': timeutils.isotime(fake_now)[:-1], 'updated': timeutils.isotime(fake_now)[:-1], 'acquired_by': None, 'services': [], 'status': 'ready', } self.assertEqual(expected, json.loads(result.body))
def serialize(self, obj): visibility = "public" if obj.is_public else "private" members = [] for member in obj.members: if member.status == "accepted" and member.deleted == 0: members.append(member.member) document = { "id": obj.id, "name": obj.name, "tags": obj.tags, "disk_format": obj.disk_format, "container_format": obj.container_format, "size": obj.size, "virtual_size": obj.virtual_size, "status": obj.status, "visibility": visibility, "checksum": obj.checksum, "min_disk": obj.min_disk, "min_ram": obj.min_ram, "owner": obj.owner, "protected": obj.protected, "members": members, "created_at": timeutils.isotime(obj.created_at), "updated_at": timeutils.isotime(obj.updated_at), } for image_property in obj.properties: document[image_property.name] = image_property.value return document
def format_image_notification(image): """ Given a glance.domain.Image object, return a dictionary of relevant notification information. We purposely do not include 'location' as it may contain credentials. """ return { "id": image.image_id, "name": image.name, "status": image.status, "created_at": timeutils.isotime(image.created_at), "updated_at": timeutils.isotime(image.updated_at), "min_disk": image.min_disk, "min_ram": image.min_ram, "protected": image.protected, "checksum": image.checksum, "owner": image.owner, "disk_format": image.disk_format, "container_format": image.container_format, "size": image.size, "is_public": image.visibility == "public", "properties": dict(image.extra_properties), "tags": list(image.tags), "deleted": False, "deleted_at": None, }
def serialize(self, obj): visibility = 'public' if obj.is_public else 'private' members = [] for member in obj.members: if member.status == 'accepted' and member.deleted == 0: members.append(member.member) document = { 'id': obj.id, 'name': obj.name, 'tags': obj.tags, 'disk_format': obj.disk_format, 'container_format': obj.container_format, 'size': obj.size, 'virtual_size': obj.virtual_size, 'status': obj.status, 'visibility': visibility, 'checksum': obj.checksum, 'min_disk': obj.min_disk, 'min_ram': obj.min_ram, 'owner': obj.owner, 'protected': obj.protected, 'members': members, 'created_at': timeutils.isotime(obj.created_at), 'updated_at': timeutils.isotime(obj.updated_at) } for image_property in obj.properties: document[image_property.name] = image_property.value return document
def format_watch(watch): result = { rpc_api.WATCH_ACTIONS_ENABLED: watch.rule.get( rpc_api.RULE_ACTIONS_ENABLED), rpc_api.WATCH_ALARM_ACTIONS: watch.rule.get( rpc_api.RULE_ALARM_ACTIONS), rpc_api.WATCH_TOPIC: watch.rule.get(rpc_api.RULE_TOPIC), rpc_api.WATCH_UPDATED_TIME: timeutils.isotime(watch.updated_at), rpc_api.WATCH_DESCRIPTION: watch.rule.get(rpc_api.RULE_DESCRIPTION), rpc_api.WATCH_NAME: watch.name, rpc_api.WATCH_COMPARISON: watch.rule.get(rpc_api.RULE_COMPARISON), rpc_api.WATCH_DIMENSIONS: watch.rule.get( rpc_api.RULE_DIMENSIONS) or [], rpc_api.WATCH_PERIODS: watch.rule.get(rpc_api.RULE_PERIODS), rpc_api.WATCH_INSUFFICIENT_ACTIONS: watch.rule.get(rpc_api.RULE_INSUFFICIENT_ACTIONS), rpc_api.WATCH_METRIC_NAME: watch.rule.get(rpc_api.RULE_METRIC_NAME), rpc_api.WATCH_NAMESPACE: watch.rule.get(rpc_api.RULE_NAMESPACE), rpc_api.WATCH_OK_ACTIONS: watch.rule.get(rpc_api.RULE_OK_ACTIONS), rpc_api.WATCH_PERIOD: watch.rule.get(rpc_api.RULE_PERIOD), rpc_api.WATCH_STATE_REASON: watch.rule.get(rpc_api.RULE_STATE_REASON), rpc_api.WATCH_STATE_REASON_DATA: watch.rule.get(rpc_api.RULE_STATE_REASON_DATA), rpc_api.WATCH_STATE_UPDATED_TIME: timeutils.isotime( watch.rule.get(rpc_api.RULE_STATE_UPDATED_TIME)), rpc_api.WATCH_STATE_VALUE: watch.state, rpc_api.WATCH_STATISTIC: watch.rule.get(rpc_api.RULE_STATISTIC), rpc_api.WATCH_THRESHOLD: watch.rule.get(rpc_api.RULE_THRESHOLD), rpc_api.WATCH_UNIT: watch.rule.get(rpc_api.RULE_UNIT), rpc_api.WATCH_STACK_ID: watch.stack_id } return result
def setUp(self): super(TestDateTime, self).setUp() self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc()) self.field = fields.DateTimeField() self.coerce_good_values = [(self.dt, self.dt), (timeutils.isotime(self.dt), self.dt)] self.coerce_bad_values = [1, "foo"] self.to_primitive_values = [(self.dt, timeutils.isotime(self.dt))] self.from_primitive_values = [(timeutils.isotime(self.dt), self.dt)]
def _populate_token_dates(self, token_data, expires=None, trust=None, issued_at=None): if not expires: expires = provider.default_expire_time() if not isinstance(expires, six.string_types): expires = timeutils.isotime(expires, subsecond=True) token_data['expires_at'] = expires token_data['issued_at'] = (issued_at or timeutils.isotime(subsecond=True))
def test_update_environment(self): """Check that environment rename works.""" self._set_policy_rules( {'show_environment': '@', 'update_environment': '@'} ) self.expect_policy_check('update_environment', {'environment_id': '12345'}) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now expected = dict( id='12345', name='my-env', version=0, description_text='', created=fake_now, updated=fake_now, tenant_id=self.tenant, description={ 'Objects': { '?': {'id': '12345'} }, 'Attributes': [] } ) e = models.Environment(**expected) test_utils.save_models(e) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now del expected['description'] expected['services'] = [] expected['status'] = 'ready' expected['name'] = 'renamed_env' expected['updated'] = fake_now body = { 'name': 'renamed_env' } req = self._put('/environments/12345', jsonutils.dump_as_bytes(body)) result = req.get_response(self.api) self.assertEqual(200, result.status_code) self.expect_policy_check('show_environment', {'environment_id': '12345'}) req = self._get('/environments/12345') result = req.get_response(self.api) self.assertEqual(200, result.status_code) expected['created'] = timeutils.isotime(expected['created'])[:-1] expected['updated'] = timeutils.isotime(expected['updated'])[:-1] expected['acquired_by'] = None self.assertEqual(expected, jsonutils.loads(result.body))
def test_list_with_changes_since(self): now = timeutils.isotime() name = str(uuid.uuid4()) self._create_servers(name, 1) output = self.nova("list", params="--changes-since %s" % now) self.assertIn(name, output, output) now = timeutils.isotime() output = self.nova("list", params="--changes-since %s" % now) self.assertNotIn(name, output, output)
def _format_image(self, image): image_view = dict() try: image_view = dict(image.extra_properties) attributes = [ "name", "disk_format", "container_format", "visibility", "size", "virtual_size", "status", "checksum", "protected", "min_ram", "min_disk", "owner", ] for key in attributes: image_view[key] = getattr(image, key) image_view["id"] = image.image_id image_view["created_at"] = timeutils.isotime(image.created_at) image_view["updated_at"] = timeutils.isotime(image.updated_at) if CONF.show_multiple_locations: locations = list(image.locations) if locations: image_view["locations"] = [] for loc in locations: tmp = dict(loc) tmp.pop("id", None) tmp.pop("status", None) image_view["locations"].append(tmp) else: # NOTE (flwang): We will still show "locations": [] if # image.locations is None to indicate it's allowed to show # locations but it's just non-existent. image_view["locations"] = [] LOG.debug("There is not available location " "for image %s", image.image_id) if CONF.show_image_direct_url: if image.locations: # Choose best location configured strategy l = location_strategy.choose_best_location(image.locations) image_view["direct_url"] = l["url"] else: LOG.debug("There is not available location " "for image %s", image.image_id) image_view["tags"] = list(image.tags) image_view["self"] = self._get_image_href(image) image_view["file"] = self._get_image_href(image, "file") image_view["schema"] = "/v2/schemas/image" image_view = self.schema.filter(image_view) # domain except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) return image_view
def test_create_environment(self): """Create an environment, test environment.show().""" opts = [ cfg.StrOpt('config_dir'), cfg.StrOpt('config_file', default='murano.conf'), cfg.StrOpt('project', default='murano'), ] config.CONF.register_opts(opts) self._set_policy_rules( {'list_environments': '@', 'create_environment': '@', 'show_environment': '@'} ) self.expect_policy_check('create_environment') fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now uuids = ('env_object_id', 'network_id', 'environment_id') mock_uuid = self._stub_uuid(uuids) expected = {'tenant_id': self.tenant, 'id': 'environment_id', 'name': 'my_env', 'networking': {}, 'version': 0, # TODO(sjmc7) - bug 1347298 'created': timeutils.isotime(fake_now)[:-1], 'updated': timeutils.isotime(fake_now)[:-1]} body = {'name': 'my_env'} req = self._post('/environments', json.dumps(body)) result = req.get_response(self.api) self.assertEqual(expected, json.loads(result.body)) expected['status'] = 'ready' # Reset the policy expectation self.expect_policy_check('list_environments') req = self._get('/environments') result = req.get_response(self.api) self.assertEqual(200, result.status_code) self.assertEqual({'environments': [expected]}, json.loads(result.body)) expected['services'] = [] # Reset the policy expectation self.expect_policy_check('show_environment', {'environment_id': uuids[-1]}) req = self._get('/environments/%s' % uuids[-1]) result = req.get_response(self.api) self.assertEqual(expected, json.loads(result.body)) self.assertEqual(3, mock_uuid.call_count)
def _format_image_member(self, member): member_view = {} attributes = ["member_id", "image_id", "status"] for key in attributes: member_view[key] = getattr(member, key) member_view["created_at"] = timeutils.isotime(member.created_at) member_view["updated_at"] = timeutils.isotime(member.updated_at) member_view["schema"] = "/v2/schemas/member" member_view = self.schema.filter(member_view) return member_view
def format_metadef_tag_notification(metadef_tag): return { 'namespace': metadef_tag.namespace, 'name': metadef_tag.name, 'name_old': metadef_tag.name, 'created_at': timeutils.isotime(metadef_tag.created_at), 'updated_at': timeutils.isotime(metadef_tag.updated_at), 'deleted': False, 'deleted_at': None, }
def _format_image_member(self, member): member_view = {} attributes = ['member_id', 'image_id', 'status'] for key in attributes: member_view[key] = getattr(member, key) member_view['created_at'] = timeutils.isotime(member.created_at) member_view['updated_at'] = timeutils.isotime(member.updated_at) member_view['schema'] = '/v2/schemas/member' member_view = self.schema.filter(member_view) return member_view
def test_update_env_template(self): """Check that environment rename works.""" self._set_policy_rules( {'show_env_template': '@', 'update_env_template': '@'} ) self.expect_policy_check('update_env_template', {'env_template_id': '12345'}) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now expected = dict( id='12345', is_public=False, name='my-temp', version=0, created=fake_now, updated=fake_now, tenant_id=self.tenant, description_text='', description={ 'name': 'my-temp', '?': {'id': '12345'} } ) e = models.EnvironmentTemplate(**expected) test_utils.save_models(e) fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now del expected['description'] expected['services'] = [] expected['name'] = 'renamed_temp' expected['updated'] = fake_now body = { 'name': 'renamed_temp' } req = self._put('/templates/12345', jsonutils.dump_as_bytes(body)) result = req.get_response(self.api) self.assertEqual(200, result.status_code) self.expect_policy_check('show_env_template', {'env_template_id': '12345'}) req = self._get('/templates/12345') result = req.get_response(self.api) self.assertEqual(200, result.status_code) expected['created'] = timeutils.isotime(expected['created'])[:-1] expected['updated'] = timeutils.isotime(expected['updated'])[:-1] self.assertEqual(expected, jsonutils.loads(result.body))
def show(self, request, instance, extend_address=True): """Detailed view of a single instance.""" asset_tags = '-' image_tags = None if instance["system_metadata"] is not None: if "image_tags" in instance["system_metadata"]: image_tags = instance["system_metadata"]["image_tags"] if "image_mtwilson_trustpolicy_location" in instance["system_metadata"]: asset_tags = {'trust': 'true', 'tags': image_tags} elif "image_trust" in instance["system_metadata"]: asset_tags = {'trust': instance["system_metadata"]["image_trust"], 'tags': image_tags} server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "hostId": self._get_host_id(instance) or "", # TODO(alex_xu): '_get_image' return {} when there image_ref # isn't existed in V3 API, we revert it back to return "" in # V2.1. "tag_properties": json.dumps(asset_tags), "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance), "created": timeutils.isotime(instance["created_at"]), "updated": timeutils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance, extend_address), "links": self._get_links(request, instance["uuid"], self._collection_name), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) if (request.api_version_request >= api_version_request.APIVersionRequest("2.9")): server["server"]["locked"] = (True if instance["locked_by"] else False) return server
def format_metadef_resource_type_notification(metadef_resource_type): return { 'namespace': metadef_resource_type.namespace, 'name': metadef_resource_type.name, 'name_old': metadef_resource_type.name, 'prefix': metadef_resource_type.prefix, 'properties_target': metadef_resource_type.properties_target, 'created_at': timeutils.isotime(metadef_resource_type.created_at), 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), 'deleted': False, 'deleted_at': None, }
def test_create_environment(self): """Create an environment, test environment.show().""" self._set_policy_rules( {'list_environments': '@', 'create_environment': '@', 'show_environment': '@'} ) self.expect_policy_check('create_environment') fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now uuids = ('env_object_id', 'network_id', 'environment_id') mock_uuid = self._stub_uuid(uuids) expected = {'tenant_id': self.tenant, 'id': 'environment_id', 'name': 'my_env', 'description_text': 'description', 'version': 0, 'created': timeutils.isotime(fake_now)[:-1], 'updated': timeutils.isotime(fake_now)[:-1], } body = {'name': 'my_env', 'description_text': 'description'} req = self._post('/environments', jsonutils.dump_as_bytes(body)) result = req.get_response(self.api) self.assertEqual(expected, jsonutils.loads(result.body)) expected['status'] = 'ready' # Reset the policy expectation self.expect_policy_check('list_environments') req = self._get('/environments') result = req.get_response(self.api) self.assertEqual(200, result.status_code) self.assertEqual({'environments': [expected]}, jsonutils.loads(result.body)) expected['services'] = [] expected['acquired_by'] = None # Reset the policy expectation self.expect_policy_check('show_environment', {'environment_id': uuids[-1]}) req = self._get('/environments/%s' % uuids[-1]) result = req.get_response(self.api) self.assertEqual(expected, jsonutils.loads(result.body)) self.assertEqual(3, mock_uuid.call_count)
def format_software_config(sc): if sc is None: return result = { rpc_api.SOFTWARE_CONFIG_ID: sc.id, rpc_api.SOFTWARE_CONFIG_NAME: sc.name, rpc_api.SOFTWARE_CONFIG_GROUP: sc.group, rpc_api.SOFTWARE_CONFIG_CONFIG: sc.config['config'], rpc_api.SOFTWARE_CONFIG_INPUTS: sc.config['inputs'], rpc_api.SOFTWARE_CONFIG_OUTPUTS: sc.config['outputs'], rpc_api.SOFTWARE_CONFIG_OPTIONS: sc.config['options'], rpc_api.SOFTWARE_CONFIG_CREATION_TIME: timeutils.isotime(sc.created_at), } return result
def to_dict(self): keys = [ 'user_id', 'role_id', 'domain_id', 'domain_scope_id', 'project_id', 'audit_id', 'audit_chain_id', ] event = dict((key, self.__dict__[key]) for key in keys if self.__dict__[key] is not None) if self.trust_id is not None: event['OS-TRUST:trust_id'] = self.trust_id if self.consumer_id is not None: event['OS-OAUTH1:consumer_id'] = self.consumer_id if self.consumer_id is not None: event['OS-OAUTH1:access_token_id'] = self.access_token_id if self.expires_at is not None: event['expires_at'] = timeutils.isotime(self.expires_at) if self.issued_before is not None: event['issued_before'] = timeutils.isotime(self.issued_before, subsecond=True) return event
def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(it.size * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, timestamp=timeutils.isotime(), resource_metadata=None, )
def revocation_list(self, context, auth=None): if not CONF.token.revoke_by_id: raise exception.Gone() tokens = self.token_provider_api.list_revoked_tokens() for t in tokens: expires = t['expires'] if expires and isinstance(expires, datetime.datetime): t['expires'] = timeutils.isotime(expires) data = {'revoked': tokens} json_data = jsonutils.dumps(data) signed_text = cms.cms_sign_text(json_data, CONF.signing.certfile, CONF.signing.keyfile) return {'signed': signed_text}
def _usage_from_volume(context, volume_ref, **kw): usage_info = dict(tenant_id=volume_ref['project_id'], host=volume_ref['host'], user_id=volume_ref['user_id'], availability_zone=volume_ref['availability_zone'], volume_id=volume_ref['id'], volume_type=volume_ref['volume_type_id'], display_name=volume_ref['display_name'], launched_at=timeutils.isotime(at= volume_ref['launched_at']), created_at=timeutils.isotime(at= volume_ref['created_at']), status=volume_ref['status'], snapshot_id=volume_ref['snapshot_id'], size=volume_ref['size'], replication_status=volume_ref['replication_status'], replication_extended_status= volume_ref['replication_extended_status'], replication_driver_data= volume_ref['replication_driver_data'], ) usage_info.update(kw) return usage_info
def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.containers', type=sample.TYPE_GAUGE, volume=int(account['x-account-container-count']), unit='container', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.isotime(), resource_metadata=None, )
def _attach_network_interface_item(context, network_interface, instance_id, device_index, attach_time=None, delete_on_termination=False): if not attach_time: attach_time = timeutils.isotime(None, True) network_interface.update({ 'instance_id': instance_id, 'device_index': device_index, 'attach_time': attach_time, 'delete_on_termination': delete_on_termination }) db_api.update_item(context, network_interface)
def construct_operation(self, context, op_type, target_type, target_name, scope): operation_id = str(uuid.uuid4()) operation = { "id": operation_id, "name": "operation-" + operation_id, "insert_time": timeutils.isotime(context.timestamp, True), "user": context.user_name, "type": op_type, "target_type": target_type, "target_name": target_name, "scope_type": scope.get_type(), "scope_name": scope.get_name(), } return operation
def test_add_category(self): """Check that category added successfully """ self._set_policy_rules({'add_category': '@'}) self.expect_policy_check('add_category') fake_now = timeutils.utcnow() timeutils.utcnow.override_time = fake_now expected = { 'name': 'new_category', 'created': timeutils.isotime(fake_now)[:-1], 'updated': timeutils.isotime(fake_now)[:-1], 'package_count': 0, } body = {'name': 'new_category'} req = self._post('/catalog/categories', json.dumps(body)) result = req.get_response(self.api) processed_result = json.loads(result.body) self.assertIn('id', processed_result.keys()) expected['id'] = processed_result['id'] self.assertDictEqual(expected, processed_result)
def create_v2_token(): return { "access": { "token": { "expires": timeutils.isotime(timeutils.utcnow() + FUTURE_DELTA), "issued_at": "2013-05-21T00:02:43.941473Z", "tenant": { "enabled": True, "id": "01257", "name": "service" } } } }
def make_sample_from_instance(instance, name, type, unit, volume, resource_id=None, additional_metadata=None): additional_metadata = additional_metadata or {} resource_metadata = _get_metadata_from_object(instance) resource_metadata.update(additional_metadata) return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=instance.user_id, project_id=instance.tenant_id, resource_id=resource_id or instance.id, timestamp=timeutils.isotime(), resource_metadata=resource_metadata, )
def update_statistics(self): today = datetime.datetime.now() yesterday = datetime.timedelta(hours=24) start_from = timeutils.isotime(today-yesterday) for metric in self.monasca.metrics.list_names(): LOG.debug("Monasca statistics for metric %s", metric['name']) _query_args = dict( start_time=start_from, name=metric['name'], statistics='avg', period=int(self.creds['poll_time']), merge_metrics='true') statistics = self.monasca.metrics.list_statistics( **_query_args) self._translate_statistics(statistics)
def test_unscoped_payload(self): exp_user_id = uuid.uuid4().hex exp_methods = ['password'] exp_expires_at = timeutils.isotime(timeutils.utcnow()) exp_audit_ids = [provider.random_urlsafe_str()] payload = token_formatters.UnscopedPayload.assemble( exp_user_id, exp_methods, exp_expires_at, exp_audit_ids) (user_id, methods, expires_at, audit_ids) = (token_formatters.UnscopedPayload.disassemble(payload)) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids)
def test_time_string_to_int_conversions(self): payload_cls = token_formatters.BasePayload expected_time_str = timeutils.isotime() time_obj = timeutils.parse_isotime(expected_time_str) expected_time_int = ( (timeutils.normalize_time(time_obj) - datetime.datetime.utcfromtimestamp(0)).total_seconds()) actual_time_int = payload_cls._convert_time_string_to_int( expected_time_str) self.assertEqual(expected_time_int, actual_time_int) actual_time_str = payload_cls._convert_int_to_time_string( actual_time_int) self.assertEqual(expected_time_str, actual_time_str)
def create_token(self, token_id, data): """Create a token by id and data. It is assumed the caller has performed data validation on the "data" parameter. """ data_copy = copy.deepcopy(data) ptk = self._prefix_token_id(token_id) if not data_copy.get('expires'): data_copy['expires'] = provider.default_expire_time() if not data_copy.get('user_id'): data_copy['user_id'] = data_copy['user']['id'] # NOTE(morganfainberg): for ease of manipulating the data without # concern about the backend, always store the value(s) in the # index as the isotime (string) version so this is where the string is # built. expires_str = timeutils.isotime(data_copy['expires'], subsecond=True) self._set_key(ptk, data_copy) user_id = data['user']['id'] user_key = self._prefix_user_id(user_id) self._update_user_token_list(user_key, token_id, expires_str) if CONF.trust.enabled and data.get('trust_id'): # NOTE(morganfainberg): If trusts are enabled and this is a trust # scoped token, we add the token to the trustee list as well. This # allows password changes of the trustee to also expire the token. # There is no harm in placing the token in multiple lists, as # _list_tokens is smart enough to handle almost any case of # valid/invalid/expired for a given token. token_data = data_copy['token_data'] if data_copy['token_version'] == token.provider.V2: trustee_user_id = token_data['access']['trust'][ 'trustee_user_id'] elif data_copy['token_version'] == token.provider.V3: trustee_user_id = token_data['OS-TRUST:trust'][ 'trustee_user_id'] else: raise exception.UnsupportedTokenVersionException( _('Unknown token version %s') % data_copy.get('token_version')) trustee_key = self._prefix_user_id(trustee_user_id) self._update_user_token_list(trustee_key, token_id, expires_str) return data_copy
def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(container['bytes']), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], timestamp=timeutils.isotime(), resource_metadata=None, )
def test_get_keyring(self): cl = httpclient.HTTPClient(username=USERNAME, password=PASSWORD, tenant_id=TENANT_ID, auth_url=AUTH_URL, use_keyring=True) # set an token into the keyring auth_ref = access.AccessInfo.factory(body=PROJECT_SCOPED_TOKEN) future = timeutils.utcnow() + datetime.timedelta(minutes=30) auth_ref['token']['expires'] = timeutils.isotime(future) self.memory_keyring.password = pickle.dumps(auth_ref) # don't stub get_raw_token so will fail if authenticate happens self.assertTrue(cl.authenticate()) self.assertTrue(self.memory_keyring.fetched)
def save_operation(self, context, operation, start_time, get_progress_method, item_id, operation_result): if isinstance(operation_result, Exception): operation.update(self._error_from_exception(operation_result)) operation["start_time"] = start_time method_key = self._method_keys.get(get_progress_method) if method_key is None or "error_code" in operation: operation["progress"] = 100 operation["status"] = "DONE" operation["end_time"] = timeutils.isotime(None, True) else: operation["progress"] = 0 operation["status"] = "RUNNING" operation["method_key"] = method_key if item_id is not None: operation["item_id"] = item_id return self._add_db_item(context, operation)
def format_event(event): stack_identifier = event.stack.identifier() result = { rpc_api.EVENT_ID: dict(event.identifier()), rpc_api.EVENT_STACK_ID: dict(stack_identifier), rpc_api.EVENT_STACK_NAME: stack_identifier.stack_name, rpc_api.EVENT_TIMESTAMP: timeutils.isotime(event.timestamp), rpc_api.EVENT_RES_NAME: event.resource_name, rpc_api.EVENT_RES_PHYSICAL_ID: event.physical_resource_id, rpc_api.EVENT_RES_ACTION: event.action, rpc_api.EVENT_RES_STATUS: event.status, rpc_api.EVENT_RES_STATUS_DATA: event.reason, rpc_api.EVENT_RES_TYPE: event.resource_type, rpc_api.EVENT_RES_PROPERTIES: event.resource_properties, } return result
def create(self, trustee_user, trustor_user, role_names=None, project=None, impersonation=False, expires_at=None, remaining_uses=None, **kwargs): """Create a Trust. :param string trustee_user: user who is capable of consuming the trust :param string trustor_user: user who's authorization is being delegated :param string role_names: subset of trustor's roles to be granted :param string project: project which the trustor is delegating :param boolean impersonation: enable explicit impersonation :param datetime.datetime expires_at: expiry time :param integer remaining_uses: how many times this trust can be used to generate a token. None means unlimited tokens. """ # Convert role_names list into list-of-dict API format if role_names: roles = [{'name': n} for n in role_names] else: roles = None # Convert datetime.datetime expires_at to iso format string if expires_at: expires_str = timeutils.isotime(at=expires_at, subsecond=True) else: expires_str = None return super(TrustManager, self).create(expires_at=expires_str, impersonation=impersonation, project_id=base.getid(project), remaining_uses=remaining_uses, roles=roles, trustee_user_id=base.getid(trustee_user), trustor_user_id=base.getid(trustor_user), **kwargs)
def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock): oat_data = { "hosts": [{ "host_name": "node1", "trust_lvl": "untrusted", "vtime": timeutils.isotime() }] } req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = { 'context': mock.sentinel.ctx, 'instance_type': { 'memory_mb': 1024, 'extra_specs': extra_specs } } host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def format_notification_body(stack): # some other possibilities here are: # - template name # - template size # - resource count if stack.status is not None and stack.action is not None: state = '_'.join(stack.state) else: state = 'Unknown' result = { rpc_api.NOTIFY_TENANT_ID: stack.context.tenant_id, rpc_api.NOTIFY_USER_ID: stack.context.user, rpc_api.NOTIFY_STACK_ID: stack.identifier().arn(), rpc_api.NOTIFY_STACK_NAME: stack.name, rpc_api.NOTIFY_STATE: state, rpc_api.NOTIFY_STATE_REASON: stack.status_reason, rpc_api.NOTIFY_CREATE_AT: timeutils.isotime(stack.created_time), } return result
def test_domain_scoped_payload_with_default_domain(self): exp_user_id = uuid.uuid4().hex exp_methods = ['password'] exp_domain_id = CONF.identity.default_domain_id exp_expires_at = timeutils.isotime(timeutils.utcnow()) exp_audit_ids = [provider.random_urlsafe_str()] payload = token_formatters.DomainScopedPayload.assemble( exp_user_id, exp_methods, exp_domain_id, exp_expires_at, exp_audit_ids) (user_id, methods, domain_id, expires_at, audit_ids) = ( token_formatters.DomainScopedPayload.disassemble(payload)) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertEqual(exp_domain_id, domain_id) self.assertEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids)
def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): resource_metadata = resource_metadata or {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.isotime(), resource_metadata=resource_metadata, )
def test_project_scoped_payload_with_non_uuid_project_id(self): exp_user_id = uuid.uuid4().hex exp_methods = ['password'] exp_project_id = 'someNonUuidProjectId' exp_expires_at = timeutils.isotime(timeutils.utcnow()) exp_audit_ids = [provider.random_urlsafe_str()] payload = token_formatters.ProjectScopedPayload.assemble( exp_user_id, exp_methods, exp_project_id, exp_expires_at, exp_audit_ids) (user_id, methods, project_id, expires_at, audit_ids) = ( token_formatters.ProjectScopedPayload.disassemble(payload)) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertEqual(exp_project_id, project_id) self.assertEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids)
def serialize_swift_account(account): metadocument = { k: account.get(k, None) for k, v in account.items() if k.lower().startswith("x-account-meta") } account_fields = ('id', 'name') document = {f: account.get(f, None) for f in account_fields} document['domain_id'] = account.get('x-account-project-domain-id', None) if account.get('x-timestamp'): timestamp = float(account.get('x-timestamp')) document['created_at'] = \ timeutils.isotime(datetime.datetime.fromtimestamp(timestamp)) # lakshmiS: swift get_account() doesn't include update datetime field(?) if account.get('updated_at'): document['updated_at'] = account.get('updated_at') document.update(metadocument) return document
def test_get_samples_start_timestamp_filter(self): with mock.patch("ceilometer.monasca_client.Client") as mock_client: conn = impl_monasca.Connection("127.0.0.1:8080") metrics_list_mock = mock_client().metrics_list metrics_list_mock.return_value = ( TestGetSamples.dummy_metrics_mocked_return_value) ml_mock = mock_client().measurements_list ml_mock.return_value = ( TestGetSamples.dummy_get_samples_mocked_return_value) start_time = datetime.datetime(2015, 3, 20) sample_filter = storage.SampleFilter( meter='specific meter', start_timestamp=timeutils.isotime(start_time), start_timestamp_op='ge') list(conn.get_samples(sample_filter)) self.assertEqual(True, ml_mock.called) self.assertEqual(1, ml_mock.call_count)
def generate_sample(self, pollster_sample, pollster_definitons=None): pollster_definitions =\ pollster_definitons or self.definitions.configurations metadata = [] if 'metadata_fields' in pollster_definitions: metadata = dict((k, pollster_sample.get(k)) for k in pollster_definitions['metadata_fields']) self.generate_new_metadata_fields( metadata=metadata, pollster_definitions=pollster_definitions) return ceilometer_sample.Sample( timestamp=timeutils.isotime(), name=pollster_definitions['name'], type=pollster_definitions['sample_type'], unit=pollster_definitions['unit'], volume=pollster_sample['value'], user_id=pollster_sample.get("user_id"), project_id=pollster_sample.get("project_id"), resource_id=pollster_sample.get("id"), resource_metadata=metadata)
def format_stack_resource(resource, detail=True, with_props=False, with_attr=None): ''' Return a representation of the given resource that matches the API output expectations. ''' last_updated_time = resource.updated_time or resource.created_time res = { rpc_api.RES_UPDATED_TIME: timeutils.isotime(last_updated_time), rpc_api.RES_NAME: resource.name, rpc_api.RES_PHYSICAL_ID: resource.resource_id or '', rpc_api.RES_ACTION: resource.action, rpc_api.RES_STATUS: resource.status, rpc_api.RES_STATUS_DATA: resource.status_reason, rpc_api.RES_TYPE: resource.type(), rpc_api.RES_ID: dict(resource.identifier()), rpc_api.RES_STACK_ID: dict(resource.stack.identifier()), rpc_api.RES_STACK_NAME: resource.stack.name, rpc_api.RES_REQUIRED_BY: resource.required_by(), } if (hasattr(resource, 'nested') and callable(resource.nested) and resource.nested() is not None): res[rpc_api.RES_NESTED_STACK_ID] = dict(resource.nested().identifier()) if resource.stack.parent_resource_name: res[rpc_api.RES_PARENT_RESOURCE] = resource.stack.parent_resource_name if detail: res[rpc_api.RES_DESCRIPTION] = resource.t.description res[rpc_api.RES_METADATA] = resource.metadata_get() res[rpc_api.RES_SCHEMA_ATTRIBUTES] = format_resource_attributes( resource, with_attr) if with_props: res[rpc_api.RES_SCHEMA_PROPERTIES] = format_resource_properties( resource) return res
def format_watch_data(wd): # Demangle DB format data into something more easily used in the API # We are expecting a dict with exactly two items, Namespace and # a metric key namespace = wd.data['Namespace'] metric = [(k, v) for k, v in wd.data.items() if k != 'Namespace'] if len(metric) == 1: metric_name, metric_data = metric[0] else: LOG.error(_LE("Unexpected number of keys in watch_data.data!")) return result = { rpc_api.WATCH_DATA_ALARM: wd.watch_rule.name, rpc_api.WATCH_DATA_METRIC: metric_name, rpc_api.WATCH_DATA_TIME: timeutils.isotime(wd.created_at), rpc_api.WATCH_DATA_NAMESPACE: namespace, rpc_api.WATCH_DATA: metric_data } return result