def _validate_datetime_format(instance): try: timeutils.parse_isotime(instance) except ValueError: return False else: return True
def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) end = timeutils.parse_isotime(end) if end else None db_inst_list = db.instance_get_active_by_window_joined(context, begin, end, project_id, host) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context) project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if body: msg = _("Only 'updated_since', 'project_id' and 'deleted' are " "understood.") raise exc.HTTPBadRequest(explanation=msg) if isinstance(deleted, six.string_types): try: deleted = strutils.bool_from_string(deleted, strict=True) except ValueError as err: raise exc.HTTPBadRequest(explanation=str(err)) if updated_since: try: timeutils.parse_isotime(updated_since) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted)
def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="sync_instances") project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if body: msg = _("Only 'updated_since', 'project_id' and 'deleted' are " "understood.") raise exc.HTTPBadRequest(explanation=msg) if isinstance(deleted, six.string_types): try: deleted = strutils.bool_from_string(deleted, strict=True) except ValueError as err: raise exc.HTTPBadRequest(explanation=str(err)) if updated_since: try: timeutils.parse_isotime(updated_since) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted)
def _hours_for(self, instance, period_start, period_stop): launched_at = instance.launched_at terminated_at = instance.terminated_at if terminated_at is not None: if not isinstance(terminated_at, datetime.datetime): # NOTE(mriedem): Instance object DateTime fields are # timezone-aware so convert using isotime. terminated_at = timeutils.parse_isotime(terminated_at) if launched_at is not None: if not isinstance(launched_at, datetime.datetime): launched_at = timeutils.parse_isotime(launched_at) if terminated_at and terminated_at < period_start: return 0 # nothing if it started after the usage report ended if launched_at and launched_at > period_stop: return 0 if launched_at: # if instance launched after period_started, don't charge for first start = max(launched_at, period_start) if terminated_at: # if instance stopped before period_stop, don't charge after stop = min(period_stop, terminated_at) else: # instance is still running, so charge them up to current time stop = period_stop dt = stop - start seconds = (dt.days * 3600 * 24 + dt.seconds + dt.microseconds / 100000.0) return seconds / 3600.0 else: # instance hasn't launched, so no charge return 0
def assertServerUsage(self, server, launched_at, terminated_at): resp_launched_at = timeutils.parse_isotime( server.get('%slaunched_at' % self.prefix)) self.assertEqual(timeutils.normalize_time(resp_launched_at), launched_at) resp_terminated_at = timeutils.parse_isotime( server.get('%sterminated_at' % self.prefix)) self.assertEqual(timeutils.normalize_time(resp_terminated_at), terminated_at)
def __init__(self): timestamp = timeutils.parse_isotime('2013-08-01T11:30:25') self._images = [ { 'id': '60ff30c2-64b6-4a97-9c17-322eebc8bd60', 'name': 'fake-image-1', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': '1', }, { 'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fake-image-2', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '2', }, { 'id': '0aa076e2-def4-43d1-ae81-c77a9f9279e6', 'name': 'image-to-delete', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '2', }, ] self._new_image_attributes = { "new-image": { "id": "6a8fd89a-e636-48a4-8095-5510eab696c4", "created_at": timeutils.parse_isotime("2013-08-02T11:30:25") }, }
def _format_date(self, date_string): """Returns standard format for given date.""" if date_string is None: return None if isinstance(date_string, basestring): date_string = timeutils.parse_isotime(date_string) return date_string.strftime('%Y-%m-%dT%H:%M:%SZ')
def _init_cache_entry(self, host): self.compute_nodes[host] = { 'trust_lvl': 'unknown', 'vtime': timeutils.normalize_time( timeutils.parse_isotime("1970-01-01T00:00:00Z") ) }
def test_trusted_filter_update_cache_timezone(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': mock.sentinel.ctx, 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) timeutils.set_time_override( timeutils.normalize_time( timeutils.parse_isotime("2012-09-09T09:10:40Z"))) self.filt_cls.host_passes(host, filter_properties) # Fill the caches req_mock.reset_mock() self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) req_mock.reset_mock() timeutils.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout - 10) self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) timeutils.clear_time_override()
def test_datetime_or_str_or_none(self): dts = timeutils.isotime() dt = timeutils.parse_isotime(dts) self.assertEqual(utils.datetime_or_str_or_none(dt), dt) self.assertEqual(utils.datetime_or_str_or_none(None), None) self.assertEqual(utils.datetime_or_str_or_none(dts), dt) self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
def test_datetime_or_none(self): naive_dt = datetime.datetime.now() dt = timeutils.parse_isotime(timeutils.isotime(naive_dt)) self.assertEqual(utils.datetime_or_none(dt), dt) self.assertEqual(utils.datetime_or_none(dt), naive_dt.replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0)) self.assertEqual(utils.datetime_or_none(None), None) self.assertRaises(ValueError, utils.datetime_or_none, "foo")
def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" tenant_id = id context = req.environ['nova.context'] authorize_show(context, {'project_id': tenant_id}) try: (period_start, period_stop, ignore) = self._get_datetime_range( req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usage = self._tenant_usages_for_period(context, period_start, period_stop, tenant_id=tenant_id, detailed=True) if len(usage): usage = usage[0] else: usage = {} return {'tenant_usage': usage}
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state for compute_api. status = search_opts.pop('status', None) if status is not None: state = common.vm_state_from_status(status) if state is None: msg = _('Invalid server status: %(status)s') % locals() raise exc.HTTPBadRequest(explanation=msg) search_opts['vm_state'] = state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False # NOTE(dprince) This prevents computes' get_all() from returning # instances from multiple tenants when an admin accounts is used. # By default non-admin accounts are always limited to project/user # both here and in the compute API. if not context.is_admin or (context.is_admin and 'all_tenants' not in search_opts): if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id instance_list = self.compute_api.get_all(context, search_opts=search_opts) limited_list = self._limit_items(instance_list, req) if is_detail: self._add_instance_faults(context, limited_list) return self._view_builder.detail(req, limited_list) else: return self._view_builder.index(req, limited_list)
def coerce(obj, attr, value): if isinstance(value, six.string_types): value = timeutils.parse_isotime(value) elif not isinstance(value, datetime.datetime): raise ValueError(_('A datetime.datetime is required here')) if value.utcoffset() is None: value = value.replace(tzinfo=iso8601.iso8601.Utc()) return value
def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context) project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if body: msg = _("Only 'updated_since' and 'project_id' are understood.") raise exc.HTTPBadRequest(explanation=msg) if updated_since: try: timeutils.parse_isotime(updated_since) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted)
def test_datetime_or_none(self): naive_dt = timeutils.utcnow() dt = timeutils.parse_isotime(timeutils.isotime(naive_dt)) self.assertEqual(utils.datetime_or_none(dt), dt) self.assertEqual(utils.datetime_or_none(dt), naive_dt.replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0)) self.assertEqual(utils.datetime_or_none(None), None) self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def approval(self, req): """OAuth protocol authorization endpoint handler second part Returns webpage with verification code or redirects to provided redirect_uri specified in auth request. """ code = req.POST.get("code") if code is None: json_body = {"error": "invalid_request"} raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) client = self._clients.get(code) if client is None: json_body = {"error": "invalid_client"} raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) if time.time() - client.auth_start_time > self.AUTH_TIMEOUT: raise webob.exc.HTTPRequestTimeout() redirect_uri = req.POST.get("redirect_uri") self._check_redirect_uri(redirect_uri) username = req.POST.get("username") password = req.POST.get("password") try: keystone = keystone_client.Client( username=username, password=password, auth_url=FLAGS.keystone_gce_url) token = keystone.auth_ref["token"] client.auth_token = token["id"] s = timeutils.parse_isotime(token["issued_at"]) e = timeutils.parse_isotime(token["expires"]) client.expires_in = (e - s).seconds except Exception as ex: return webob.exc.HTTPUnauthorized(ex) if redirect_uri == self.INTERNAL_REDIRECT_URI: return "<html><body>Verification code is: "\ + client.code + "</body></html>" uri = redirect_uri + "?code=" + code raise webob.exc.HTTPFound(location=uri)
def test_sync_instances(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) project_id = 'fake_project_id' updated_since_raw = 'fake_updated_since_raw' updated_since_parsed = 'fake_updated_since_parsed' deleted = 'fake_deleted' instance1 = dict(uuid='fake_uuid1', deleted=False) instance2 = dict(uuid='fake_uuid2', deleted=True) fake_instances = [instance1, instance2] self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_update_at_top') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') self.mox.StubOutWithMock(timeutils, 'parse_isotime') self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync') # Middle cell. timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn([]) # Bottom/Target cell timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn(fake_instances) self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2) self.mox.ReplayAll() self.src_msg_runner.sync_instances(self.ctxt, project_id, updated_since_raw, deleted)
def test_dt_serializer(self): class Obj(object): foo = utils.dt_serializer("bar") obj = Obj() obj.bar = timeutils.parse_isotime("1955-11-05T00:00:00Z") self.assertEqual(obj.foo(), "1955-11-05T00:00:00Z") obj.bar = None self.assertEqual(obj.foo(), None) obj.bar = "foo" self.assertRaises(AttributeError, obj.foo)
def sync_instances(self, message, project_id, updated_since, deleted, **kwargs): projid_str = project_id is None and "<all>" or project_id since_str = updated_since is None and "<all>" or updated_since LOG.info(_("Forcing a sync of instances, project_id=" "%(projid_str)s, updated_since=%(since_str)s"), locals()) if updated_since is not None: updated_since = timeutils.parse_isotime(updated_since) instances = cells_utils.get_instances_to_sync( message.ctxt, updated_since=updated_since, project_id=project_id, deleted=deleted ) for instance in instances: self._sync_instance(message.ctxt, instance)
def test_dt_serializer(self): class Obj(object): foo = utils.dt_serializer('bar') obj = Obj() obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z') self.assertEqual(obj.foo(), '1955-11-05T00:00:00Z') obj.bar = None self.assertEqual(obj.foo(), None) obj.bar = 'foo' self.assertRaises(AttributeError, obj.foo)
def _convert_timestamps_to_datetimes(image_meta): """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): try: image_meta[attr] = timeutils.parse_local_strtime( image_meta[attr]) except Exception: msg = (_("parse local time format error," "time string is %s") % image_meta[attr]) LOG.debug(msg) image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta
def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime( timeutils.isotime(now)).replace(tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout( self.context, 'host', now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, 'host'), args[:2]) self.assertEqual({}, kwargs)
def coerce(obj, attr, value): if isinstance(value, six.string_types): # NOTE(danms): Being tolerant of isotime strings here will help us # during our objects transition value = timeutils.parse_isotime(value) elif not isinstance(value, datetime.datetime): raise ValueError(_('A datetime.datetime is required here')) if value.utcoffset() is None: # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, # but are returned without a timezone attached. # As a transitional aid, assume a tz-naive object is in UTC. value = value.replace(tzinfo=iso8601.iso8601.Utc()) return value
def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime( timeutils.isotime(now)).replace( tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context, 'host', now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, 'host'), args[:2]) self.assertEqual({}, kwargs)
def index(self, req): """Retrieve tenant_usage for all tenants.""" context = req.environ['nova.context'] authorize_list(context) (period_start, period_stop, detailed) = self._get_datetime_range(req) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usages = self._tenant_usages_for_period(context, period_start, period_stop, detailed=detailed) return {'tenant_usages': usages}
def test_datetime_deserialization(self): red_letter_date = timeutils.parse_isotime(timeutils.isotime(datetime.datetime(1955, 11, 5))) inst = instance.Instance(uuid="fake-uuid", launched_at=red_letter_date) primitive = inst.obj_to_primitive() expected = { "nova_object.name": "Instance", "nova_object.namespace": "nova", "nova_object.version": "1.10", "nova_object.data": {"uuid": "fake-uuid", "launched_at": "1955-11-05T00:00:00Z"}, "nova_object.changes": ["launched_at", "uuid"], } self.assertEqual(primitive, expected) inst2 = instance.Instance.obj_from_primitive(primitive) self.assertIsInstance(inst2.launched_at, datetime.datetime) self.assertEqual(inst2.launched_at, red_letter_date)
def sync_instances(self, message, project_id, updated_since, deleted, **kwargs): projid_str = project_id is None and "<all>" or project_id since_str = updated_since is None and "<all>" or updated_since LOG.info( _("Forcing a sync of instances, project_id=" "%(projid_str)s, updated_since=%(since_str)s"), locals()) if updated_since is not None: updated_since = timeutils.parse_isotime(updated_since) instances = cells_utils.get_instances_to_sync( message.ctxt, updated_since=updated_since, project_id=project_id, deleted=deleted) for instance in instances: self._sync_instance(message.ctxt, instance)
def test_datetime_deserialization(self): red_letter_date = timeutils.parse_isotime( timeutils.isotime(datetime.datetime(1955, 11, 5))) inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date) primitive = inst.obj_to_primitive() expected = {'nova_object.name': 'Instance', 'nova_object.namespace': 'nova', 'nova_object.version': '1.9', 'nova_object.data': {'uuid': 'fake-uuid', 'launched_at': '1955-11-05T00:00:00Z'}, 'nova_object.changes': ['launched_at', 'uuid']} self.assertEqual(primitive, expected) inst2 = instance.Instance.obj_from_primitive(primitive) self.assertIsInstance(inst2.launched_at, datetime.datetime) self.assertEqual(inst2.launched_at, red_letter_date)
def test_datetime_deserialization(self): red_letter_date = timeutils.parse_isotime( timeutils.isotime(datetime.datetime(1955, 11, 5))) inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date) primitive = inst.obj_to_primitive() expected = {'nova_object.name': 'Instance', 'nova_object.namespace': 'nova', 'nova_object.version': '1.11', 'nova_object.data': {'uuid': 'fake-uuid', 'launched_at': '1955-11-05T00:00:00Z'}, 'nova_object.changes': ['launched_at', 'uuid']} self.assertEqual(primitive, expected) inst2 = instance.Instance.obj_from_primitive(primitive) self.assertIsInstance(inst2.launched_at, datetime.datetime) self.assertEqual(inst2.launched_at, red_letter_date)
def _update_cache_entry(self, state): entry = {} host = state['host_name'] entry['trust_lvl'] = state['trust_lvl'] try: # Normalize as naive object to interoperate with utcnow(). entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: # Mark the system as un-trusted if get invalid vtime. entry['trust_lvl'] = 'unknown' entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry
def index(self, req): """Retrieve tenant_usage for all tenants.""" context = req.environ['nova.context'] authorize_list(context) try: (period_start, period_stop, detailed) = self._get_datetime_range( req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usages = self._tenant_usages_for_period(context, period_start, period_stop, detailed=detailed) return {'tenant_usages': usages}
def _request_admin_token(self): """Retrieve new token as admin user from keystone. :return token id upon success :raises ServerError when unable to communicate with keystone Irrespective of the auth version we are going to use for the user token, for simplicity we always use a v2 admin token to validate the user token. """ params = { 'auth': { 'passwordCredentials': { 'username': self.admin_user, 'password': self.admin_password, }, 'tenantName': self.admin_tenant_name, } } response, data = self._json_request(self.keystone_auth_host, self.keystone_auth_port, 'POST', '/v2.0/tokens', body=params) try: token = data['access']['token']['id'] expiry = data['access']['token']['expires'] assert token assert expiry datetime_expiry = timeutils.parse_isotime(expiry) return (token, timeutils.normalize_time(datetime_expiry)) except (AssertionError, KeyError): self.LOG.warn( "Unexpected response from keystone service: %s", data) raise ServiceError('invalid json response') except (ValueError): self.LOG.warn( "Unable to parse expiration time from token: %s", data) raise ServiceError('invalid json response')
def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" tenant_id = id context = req.environ['nova.context'] authorize_show(context, {'project_id': tenant_id}) (period_start, period_stop, ignore) = self._get_datetime_range(req) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usage = self._tenant_usages_for_period(context, period_start, period_stop, tenant_id=tenant_id, detailed=True) if len(usage): usage = usage[0] else: usage = {} return {'tenant_usage': usage}
def _update_cache_entry(self, state): entry = {} host = state['host_name'] entry['trust_lvl'] = state['trust_lvl'] try: # Normalize as naive object to interoperate with utcnow(). entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: try: # Mt. Wilson does not necessarily return an ISO8601 formatted # `vtime`, so we should try to parse it as a string formatted # datetime. vtime = timeutils.parse_strtime(state['vtime'], fmt="%c") entry['vtime'] = timeutils.normalize_time(vtime) except ValueError: # Mark the system as un-trusted if get invalid vtime. entry['trust_lvl'] = 'unknown' entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry
def test_trusted_filter_update_cache_timezone(self, req_mock): oat_data = { "hosts": [{ "host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00" }] } req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = { 'context': mock.sentinel.ctx, 'instance_type': { 'memory_mb': 1024, 'extra_specs': extra_specs } } host = fakes.FakeHostState('host1', 'node1', {}) timeutils.set_time_override( timeutils.normalize_time( timeutils.parse_isotime("2012-09-09T09:10:40Z"))) self.filt_cls.host_passes(host, filter_properties) # Fill the caches req_mock.reset_mock() self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) req_mock.reset_mock() timeutils.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout - 10) self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) timeutils.clear_time_override()
def _disassociate_all_by_timeout(cls, context, host, time_str): time = timeutils.parse_isotime(time_str) return db.fixed_ip_disassociate_all_by_timeout(context, host, time)
def datetime_or_str_or_none(val): if isinstance(val, basestring): return timeutils.parse_isotime(val) return datetime_or_none(val)
def dt_deserializer(instance, val): """A deserializer method for datetime attributes.""" if val is None: return None else: return timeutils.parse_isotime(val)
def _convert_timestamps_to_datetimes(image_meta): """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta
def test_dt_deserializer(self): dt = timeutils.parse_isotime('1955-11-05T00:00:00Z') self.assertEqual(utils.dt_deserializer(None, timeutils.isotime(dt)), dt) self.assertEqual(utils.dt_deserializer(None, None), None) self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo')
def from_primitive(self, obj, attr, value): return self.coerce(obj, attr, timeutils.parse_isotime(value))
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. status = search_opts.pop('status', None) if status is not None: vm_state, task_state = common.task_and_vm_state_from_status(status) if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes_since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes_since']) except ValueError: msg = _('Invalid changes_since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes_since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes_since' is specified, because 'changes_since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes_since' not in search_opts: # No 'changes_since', so we only want non-deleted servers search_opts['deleted'] = False if 'changes_since' in search_opts: search_opts['changes-since'] = search_opts.pop('changes_since') if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPBadRequest(explanation=msg) # If tenant_id is passed as a search parameter this should # imply that all_tenants is also enabled unless explicitly # disabled. Note that the tenant_id parameter is filtered out # by remove_invalid_options above unless the requestor is an # admin. if 'tenant_id' in search_opts and not 'all_tenants' in search_opts: # We do not need to add the all_tenants flag if the tenant # id associated with the token is the tenant id # specified. This is done so a request that does not need # the all_tenants flag does not fail because of lack of # policy permission for compute:get_all_tenants when it # doesn't actually need it. if context.project_id != search_opts.get('tenant_id'): search_opts['all_tenants'] = 1 # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(str(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', { 'project_id': context.project_id, 'user_id': context.user_id }) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) try: instance_list = self.compute_api.get_all( context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices']) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: log_msg = _("Flavor '%s' could not be found ") LOG.debug(log_msg, search_opts['flavor']) instance_list = [] if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response