def assertEqualTokens(self, a, b, enforce_audit_ids=True): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): token["access"]["token"]["id"] = "dummy" del token["access"]["token"]["expires"] del token["access"]["token"]["issued_at"] del token["access"]["token"]["audit_ids"] return token self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a["access"]["token"]["expires"]), timeutils.parse_isotime(b["access"]["token"]["expires"]), ) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a["access"]["token"]["issued_at"]), timeutils.parse_isotime(b["access"]["token"]["issued_at"]), ) if enforce_audit_ids: self.assertIn(a["access"]["token"]["audit_ids"][0], b["access"]["token"]["audit_ids"]) self.assertThat(len(a["access"]["token"]["audit_ids"]), matchers.LessThan(3)) self.assertThat(len(b["access"]["token"]["audit_ids"]), matchers.LessThan(3)) return self.assertDictEqual(normalize(a), normalize(b))
def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="sync_instances") project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if body: msg = _("Only 'updated_since', 'project_id' and 'deleted' are " "understood.") raise exc.HTTPBadRequest(explanation=msg) if isinstance(deleted, six.string_types): try: deleted = strutils.bool_from_string(deleted, strict=True) except ValueError as err: raise exc.HTTPBadRequest(explanation=six.text_type(err)) if updated_since: try: timeutils.parse_isotime(updated_since) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted)
def _validate_datetime_format(instance): try: timeutils.parse_isotime(instance) except ValueError: return False else: return True
def test_compare(self): zulu = timeutils.parse_isotime('2012-02-14T20:53:07') east = timeutils.parse_isotime('2012-02-14T20:53:07-01:00') west = timeutils.parse_isotime('2012-02-14T20:53:07+01:00') self.assertTrue(east > west) self.assertTrue(east > zulu) self.assertTrue(zulu > west)
def test_compare_micros(self): zulu = timeutils.parse_isotime('2012-02-14T20:53:07.6544') east = timeutils.parse_isotime('2012-02-14T19:53:07.654321-01:00') west = timeutils.parse_isotime('2012-02-14T21:53:07.655+01:00') self.assertTrue(east < west) self.assertTrue(east < zulu) self.assertTrue(zulu < west)
def _add_to_revocation_list(self, data, lock): filtered_list = [] revoked_token_data = {} current_time = self._get_current_time() expires = data['expires'] if isinstance(expires, six.string_types): expires = timeutils.parse_isotime(expires) expires = timeutils.normalize_time(expires) if expires < current_time: LOG.warning( _('Token `%s` is expired, not adding to the ' 'revocation list.'), data['id']) return revoked_token_data['expires'] = timeutils.isotime(expires, subsecond=True) revoked_token_data['id'] = data['id'] token_list = self._get_key_or_default(self.revocation_key, default=[]) if not isinstance(token_list, list): # NOTE(morganfainberg): In the case that the revocation list is not # in a format we understand, reinitialize it. This is an attempt to # not allow the revocation list to be completely broken if # somehow the key is changed outside of keystone (e.g. memcache # that is shared by multiple applications). Logging occurs at error # level so that the cloud administrators have some awareness that # the revocation_list needed to be cleared out. In all, this should # be recoverable. Keystone cannot control external applications # from changing a key in some backends, however, it is possible to # gracefully handle and notify of this event. LOG.error( _('Reinitializing revocation list due to error ' 'in loading revocation list from backend. ' 'Expected `list` type got `%(type)s`. Old ' 'revocation list data: %(list)r'), { 'type': type(token_list), 'list': token_list }) token_list = [] # NOTE(morganfainberg): on revocation, cleanup the expired entries, try # to keep the list of tokens revoked at the minimum. for token_data in token_list: try: expires_at = timeutils.normalize_time( timeutils.parse_isotime(token_data['expires'])) except ValueError: LOG.warning( _('Removing `%s` from revocation list due to ' 'invalid expires data in revocation list.'), token_data.get('id', 'INVALID_TOKEN_DATA')) continue if expires_at > current_time: filtered_list.append(token_data) filtered_list.append(revoked_token_data) self._set_key(self.revocation_key, filtered_list, lock)
def _hours_for(self, instance, period_start, period_stop): launched_at = instance.launched_at terminated_at = instance.terminated_at if terminated_at is not None: if not isinstance(terminated_at, datetime.datetime): # NOTE(mriedem): Instance object DateTime fields are # timezone-aware so convert using isotime. terminated_at = timeutils.parse_isotime(terminated_at) if launched_at is not None: if not isinstance(launched_at, datetime.datetime): launched_at = timeutils.parse_isotime(launched_at) if terminated_at and terminated_at < period_start: return 0 # nothing if it started after the usage report ended if launched_at and launched_at > period_stop: return 0 if launched_at: # if instance launched after period_started, don't charge for first start = max(launched_at, period_start) if terminated_at: # if instance stopped before period_stop, don't charge after stop = min(period_stop, terminated_at) else: # instance is still running, so charge them up to current time stop = period_stop dt = stop - start seconds = (dt.days * 3600 * 24 + dt.seconds + dt.microseconds / 100000.0) return seconds / 3600.0 else: # instance hasn't launched, so no charge return 0
def build_token_values(token_data): token_expires_at = timeutils.parse_isotime(token_data['expires_at']) # Trim off the microseconds because the revocation event only has # expirations accurate to the second. token_expires_at = token_expires_at.replace(microsecond=0) token_values = { 'expires_at': timeutils.normalize_time(token_expires_at), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at']))} user = token_data.get('user') if user is not None: token_values['user_id'] = user['id'] token_values['identity_domain_id'] = user['domain']['id'] else: token_values['user_id'] = None token_values['identity_domain_id'] = None project = token_data.get('project', token_data.get('tenant')) if project is not None: token_values['project_id'] = project['id'] token_values['assignment_domain_id'] = project['domain']['id'] else: token_values['project_id'] = None domain = token_data.get('domain') if domain is not None: token_values['assignment_domain_id'] = domain['id'] else: token_values['assignment_domain_id'] = None role_list = [] roles = token_data.get('roles') if roles is not None: for role in roles: role_list.append(role['id']) token_values['roles'] = role_list trust = token_data.get('OS-TRUST:trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_user']['id'] token_values['trustee_id'] = trust['trustee_user']['id'] oauth1 = token_data.get('OS-OAUTH1') if oauth1 is None: token_values['consumer_id'] = None token_values['access_token_id'] = None else: token_values['consumer_id'] = oauth1['consumer_id'] token_values['access_token_id'] = oauth1['access_token_id'] return token_values
def assertServerUsage(self, server, launched_at, terminated_at): resp_launched_at = timeutils.parse_isotime( server.get('%slaunched_at' % self.prefix)) self.assertEqual(timeutils.normalize_time(resp_launched_at), launched_at) resp_terminated_at = timeutils.parse_isotime( server.get('%sterminated_at' % self.prefix)) self.assertEqual(timeutils.normalize_time(resp_terminated_at), terminated_at)
def _add_to_revocation_list(self, data, lock): filtered_list = [] revoked_token_data = {} current_time = self._get_current_time() expires = data['expires'] if isinstance(expires, six.string_types): expires = timeutils.parse_isotime(expires) expires = timeutils.normalize_time(expires) if expires < current_time: LOG.warning(_('Token `%s` is expired, not adding to the ' 'revocation list.'), data['id']) return revoked_token_data['expires'] = timeutils.isotime(expires, subsecond=True) revoked_token_data['id'] = data['id'] token_list = self._get_key_or_default(self.revocation_key, default=[]) if not isinstance(token_list, list): # NOTE(morganfainberg): In the case that the revocation list is not # in a format we understand, reinitialize it. This is an attempt to # not allow the revocation list to be completely broken if # somehow the key is changed outside of keystone (e.g. memcache # that is shared by multiple applications). Logging occurs at error # level so that the cloud administrators have some awareness that # the revocation_list needed to be cleared out. In all, this should # be recoverable. Keystone cannot control external applications # from changing a key in some backends, however, it is possible to # gracefully handle and notify of this event. LOG.error(_('Reinitializing revocation list due to error ' 'in loading revocation list from backend. ' 'Expected `list` type got `%(type)s`. Old ' 'revocation list data: %(list)r'), {'type': type(token_list), 'list': token_list}) token_list = [] # NOTE(morganfainberg): on revocation, cleanup the expired entries, try # to keep the list of tokens revoked at the minimum. for token_data in token_list: try: expires_at = timeutils.normalize_time( timeutils.parse_isotime(token_data['expires'])) except ValueError: LOG.warning(_('Removing `%s` from revocation list due to ' 'invalid expires data in revocation list.'), token_data.get('id', 'INVALID_TOKEN_DATA')) continue if expires_at > current_time: filtered_list.append(token_data) filtered_list.append(revoked_token_data) self._set_key(self.revocation_key, filtered_list, lock)
def _get_active_by_window_joined( cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False ): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) end = timeutils.parse_isotime(end) if end else None db_inst_list = db.instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=_expected_cols(expected_attrs) ) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def test_token_expiry_maintained(self, mock_utcnow): now = datetime.datetime.utcnow() mock_utcnow.return_value = now foo_client = self.get_client(self.user_foo) orig_token = foo_client.service_catalog.catalog["token"] mock_utcnow.return_value = now + datetime.timedelta(seconds=1) reauthenticated_token = foo_client.tokens.authenticate(token=foo_client.auth_token) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(orig_token["expires"]), timeutils.parse_isotime(reauthenticated_token.expires) )
def test_token_expiry_maintained(self, mock_utcnow): now = datetime.datetime.utcnow() mock_utcnow.return_value = now foo_client = self.get_client(self.user_foo) orig_token = foo_client.service_catalog.catalog['token'] mock_utcnow.return_value = now + datetime.timedelta(seconds=1) reauthenticated_token = foo_client.tokens.authenticate( token=foo_client.auth_token) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(orig_token['expires']), timeutils.parse_isotime(reauthenticated_token.expires))
def build_token_values(token_data): token_values = { 'expires_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['expires_at'])), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at'])) } user = token_data.get('user') if user is not None: token_values['user_id'] = user['id'] token_values['identity_domain_id'] = user['domain']['id'] else: token_values['user_id'] = None token_values['identity_domain_id'] = None project = token_data.get('project', token_data.get('tenant')) if project is not None: token_values['project_id'] = project['id'] token_values['assignment_domain_id'] = project['domain']['id'] else: token_values['project_id'] = None token_values['assignment_domain_id'] = None role_list = [] roles = token_data.get('roles') if roles is not None: for role in roles: role_list.append(role['id']) token_values['roles'] = role_list trust = token_data.get('OS-TRUST:trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_user']['id'] token_values['trustee_id'] = trust['trustee_user']['id'] oauth1 = token_data.get('OS-OAUTH1') if oauth1 is None: token_values['consumer_id'] = None token_values['access_token_id'] = None else: token_values['consumer_id'] = oauth1['consumer_id'] token_values['access_token_id'] = oauth1['access_token_id'] return token_values
def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) end = timeutils.parse_isotime(end) if end else None db_inst_list = db.instance_get_active_by_window_joined(context, begin, end, project_id, host) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def build_token_values_v2(access, default_domain_id): token_data = access['token'] token_expires_at = timeutils.parse_isotime(token_data['expires']) # Trim off the microseconds because the revocation event only has # expirations accurate to the second. token_expires_at = token_expires_at.replace(microsecond=0) token_values = { 'expires_at': timeutils.normalize_time(token_expires_at), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at'])), 'audit_id': token_data.get('audit_ids', [None])[0], 'audit_chain_id': token_data.get('audit_ids', [None])[-1], } token_values['user_id'] = access.get('user', {}).get('id') project = token_data.get('tenant') if project is not None: token_values['project_id'] = project['id'] else: token_values['project_id'] = None token_values['identity_domain_id'] = default_domain_id token_values['assignment_domain_id'] = default_domain_id trust = token_data.get('trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_id'] token_values['trustee_id'] = trust['trustee_id'] token_values['consumer_id'] = None token_values['access_token_id'] = None role_list = [] # Roles are by ID in metadata and by name in the user section roles = access.get('metadata', {}).get('roles', []) for role in roles: role_list.append(role) token_values['roles'] = role_list return token_values
def build_token_values(token_data): token_values = { 'expires_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['expires_at'])), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at']))} user = token_data.get('user') if user is not None: token_values['user_id'] = user['id'] token_values['identity_domain_id'] = user['domain']['id'] else: token_values['user_id'] = None token_values['identity_domain_id'] = None project = token_data.get('project', token_data.get('tenant')) if project is not None: token_values['project_id'] = project['id'] token_values['assignment_domain_id'] = project['domain']['id'] else: token_values['project_id'] = None token_values['assignment_domain_id'] = None role_list = [] roles = token_data.get('roles') if roles is not None: for role in roles: role_list.append(role['id']) token_values['roles'] = role_list trust = token_data.get('OS-TRUST:trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_user']['id'] token_values['trustee_id'] = trust['trustee_user']['id'] oauth1 = token_data.get('OS-OAUTH1') if oauth1 is None: token_values['consumer_id'] = None token_values['access_token_id'] = None else: token_values['consumer_id'] = oauth1['consumer_id'] token_values['access_token_id'] = oauth1['access_token_id'] return token_values
def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug( _('metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s') % ({ 'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume'] })) if publisher_utils.verify_signature( meter, self.conf.publisher.metering_secret): try: # Convert the timestamp to a datetime instance. # Storage engines are responsible for converting # that value to something they can store. if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.storage_conn.record_metering_data(meter) except Exception as err: LOG.exception(_('Failed to record metering data: %s'), err) else: LOG.warning( _('message signature invalid, discarding message: %r'), meter)
def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug(_( 'metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s') % ({'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume']})) if publisher_utils.verify_signature( meter, self.conf.publisher.metering_secret): try: # Convert the timestamp to a datetime instance. # Storage engines are responsible for converting # that value to something they can store. if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.meter_conn.record_metering_data(meter) except Exception as err: LOG.exception(_('Failed to record metering data: %s'), err) else: LOG.warning(_( 'message signature invalid, discarding message: %r'), meter)
def test_datetime_or_str_or_none(self): dts = timeutils.isotime() dt = timeutils.parse_isotime(dts) self.assertEqual(utils.datetime_or_str_or_none(dt), dt) self.assertIsNone(utils.datetime_or_str_or_none(None)) self.assertEqual(utils.datetime_or_str_or_none(dts), dt) self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug(_('handling sample %s'), (s, )) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # we only allow negative deltas for noncumulative samples, whereas # for cumulative we assume that a reset has occurred in the interim # so that the current volume gives a lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume) rate_of_change = ((1.0 * volume_delta / time_delta) if time_delta else 0.0) s = self._convert(s, rate_of_change) LOG.debug(_('converted to: %s'), (s, )) else: LOG.warn(_('dropping sample with no predecessor: %s'), (s, )) s = None return s
def _is_valid_token(self, token): """Verify the token is valid format and has not expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) try: # Get the data we need from the correct location (V2 and V3 tokens # differ in structure, Try V3 first, fall back to V2 second) token_data = token.get('token', token.get('access')) expires_at = token_data.get('expires_at', token_data.get('expires')) if not expires_at: expires_at = token_data['token']['expires'] expiry = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) except Exception: LOG.exception( _('Unexpected error or malformed token determining ' 'token expiry: %s'), token) raise exception.TokenNotFound(_('Failed to validate token')) if current_time < expiry: self.check_revocation(token) # Token has not expired and has not been revoked. return None else: raise exception.TokenNotFound(_('Failed to validate token'))
def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp)
def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug(_('handling sample %s'), (s,)) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # we only allow negative deltas for noncumulative samples, whereas # for cumulative we assume that a reset has occurred in the interim # so that the current volume gives a lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume) rate_of_change = ((1.0 * volume_delta / time_delta) if time_delta else 0.0) s = self._convert(s, rate_of_change) LOG.debug(_('converted to: %s'), (s,)) else: LOG.warn(_('dropping sample with no predecessor: %s'), (s,)) s = None return s
def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" tenant_id = id context = req.environ['nova.context'] authorize_show(context, {'project_id': tenant_id}) try: (period_start, period_stop, ignore) = self._get_datetime_range(req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usage = self._tenant_usages_for_period(context, period_start, period_stop, tenant_id=tenant_id, detailed=True) if len(usage): usage = usage[0] else: usage = {} return {'tenant_usage': usage}
def _is_valid_token(self, token): """Verify the token is valid format and has not expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) try: # Get the data we need from the correct location (V2 and V3 tokens # differ in structure, Try V3 first, fall back to V2 second) token_data = token.get('token', token.get('access')) expires_at = token_data.get('expires_at', token_data.get('expires')) if not expires_at: expires_at = token_data['token']['expires'] expiry = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) except Exception: LOG.exception(_('Unexpected error or malformed token determining ' 'token expiry: %s'), token) raise exception.TokenNotFound(_('Failed to validate token')) if current_time < expiry: self.check_revocation(token) # Token has not expired and has not been revoked. return None else: raise exception.TokenNotFound(_('Failed to validate token'))
def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" tenant_id = id context = req.environ['nova.context'] authorize_show(context, {'project_id': tenant_id}) try: (period_start, period_stop, ignore) = self._get_datetime_range( req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.strtime()) if period_stop > now: period_stop = now usage = self._tenant_usages_for_period(context, period_start, period_stop, tenant_id=tenant_id, detailed=True) if len(usage): usage = usage[0] else: usage = {} return {'tenant_usage': usage}
def get_events(self, event_filter): iclient = es.client.IndicesClient(self.conn) indices = iclient.get_mapping('%s_*' % self.index_name).keys() if indices: filter_args = self._make_dsl_from_filter(indices, event_filter) results = self.conn.search( fields=['_id', 'timestamp', '_type', '_source'], sort='timestamp:asc', **filter_args) trait_mappings = {} for record in results['hits']['hits']: trait_list = [] if not record['_type'] in trait_mappings: trait_mappings[record['_type']] = list( self.get_trait_types(record['_type'])) for key in record['_source']['traits'].keys(): value = record['_source']['traits'][key] for t_map in trait_mappings[record['_type']]: if t_map['name'] == key: dtype = t_map['data_type'] break trait_list.append( models.Trait(name=key, dtype=dtype, value=models.Trait.convert_value( dtype, value))) gen_ts = timeutils.normalize_time( timeutils.parse_isotime(record['_source']['timestamp'])) yield models.Event(message_id=record['_id'], event_type=record['_type'], generated=gen_ts, traits=sorted( trait_list, key=operator.attrgetter('dtype')))
def test_trusted_filter_update_cache_timezone(self, req_mock): oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': mock.sentinel.ctx, 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) timeutils.set_time_override( timeutils.normalize_time( timeutils.parse_isotime("2012-09-09T09:10:40Z"))) self.filt_cls.host_passes(host, filter_properties) # Fill the caches req_mock.reset_mock() self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) req_mock.reset_mock() timeutils.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout - 10) self.filt_cls.host_passes(host, filter_properties) self.assertFalse(req_mock.called) timeutils.clear_time_override()
def test_multiple_samples(self): """Send multiple samples. The usecase here is to reduce the chatter and send the counters at a slower cadence. """ samples = [] for x in range(6): dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) s = { 'counter_name': 'apples', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': float(x * 3), 'source': 'evil', 'timestamp': dt.isoformat(), 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': { 'name1': str(x), 'name2': str(x + 4) } } samples.append(s) data = self.post_json('/meters/apples/', samples) for x, s in enumerate(samples): # source is modified to include the project_id. s['source'] = '%s:%s' % (s['project_id'], s['source']) # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # remove tzinfo to compare generated timestamp # with the provided one c = data.json[x] timestamp = timeutils.parse_isotime(c['timestamp']) c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() # do the same on the pipeline msg = self.published[0][x] timestamp = timeutils.parse_isotime(msg['timestamp']) msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() self.assertEqual(s, c) self.assertEqual(s, self.published[0][x])
def authenticate(self, context, auth_payload, user_context): try: if 'id' not in auth_payload: raise exception.ValidationError(attribute='id', target=self.method) token_id = auth_payload['id'] response = self.token_provider_api.validate_token(token_id) # For V3 tokens, the essential data is under the 'token' value. # For V2, the comparable data was nested under 'access'. token_ref = response.get('token', response.get('access')) # Do not allow tokens used for delegation to # create another token, or perform any changes of # state in Keystone. To do so is to invite elevation of # privilege attacks if 'OS-TRUST:trust' in token_ref: raise exception.Forbidden() if 'trust' in token_ref: raise exception.Forbidden() if 'trust_id' in token_ref.get('metadata', {}): raise exception.Forbidden() if 'OS-OAUTH1' in token_ref: raise exception.Forbidden() wsgi.validate_token_bind(context, token_ref) # New tokens maintain the audit_id of the original token in the # chain (if possible) as the second element in the audit data # structure. Look for the last element in the audit data structure # which will be either the audit_id of the token (in the case of # a token that has not been rescoped) or the audit_chain id (in # the case of a token that has been rescoped). try: token_audit_id = token_ref.get('audit_ids', [])[-1] except IndexError: # NOTE(morganfainberg): In the case this is a token that was # issued prior to audit id existing, the chain is not tracked. token_audit_id = None # New tokens are not allowed to extend the expiration # time of an old token, otherwise, they could be extened # forever. The expiration value was stored at different # locations in v2 and v3 tokens. expires_at = token_ref.get('expires_at') if not expires_at: expires_at = token_ref.get('expires') if not expires_at: expires_at = timeutils.normalize_time( timeutils.parse_isotime(token_ref['token']['expires'])) user_context.setdefault('expires_at', expires_at) user_context['audit_id'] = token_audit_id user_context.setdefault('user_id', token_ref['user']['id']) user_context['extras'].update(token_ref.get('extras', {})) user_context['method_names'].extend(token_ref.get('methods', [])) except AssertionError as e: LOG.error(e) raise exception.Unauthorized(e)
def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) return str(value)
def _init_cache_entry(self, host): self.compute_nodes[host] = { 'trust_lvl': 'unknown', 'vtime': timeutils.normalize_time( timeutils.parse_isotime("1970-01-01T00:00:00Z")) }
def test_datetime_or_none(self): naive_dt = datetime.datetime.now() dt = timeutils.parse_isotime(timeutils.isotime(naive_dt)) self.assertEqual(utils.datetime_or_none(dt), dt) self.assertEqual(utils.datetime_or_none(dt), naive_dt.replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0)) self.assertIsNone(utils.datetime_or_none(None)) self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def _parse_expiration_date(self, expiration_date): if expiration_date is None: return None if not expiration_date.endswith('Z'): expiration_date += 'Z' try: return timeutils.parse_isotime(expiration_date) except ValueError: raise exception.ValidationTimeStampError()
def authorize_request_token(self, context, request_token_id, roles): """An authenticated user is going to authorize a request token. As a security precaution, the requested roles must match those in the request token. Because this is in a CLI-only world at the moment, there is not another easy way to make sure the user knows which roles are being requested before authorizing. """ auth_context = context.get("environment", {}).get("KEYSTONE_AUTH_CONTEXT", {}) if auth_context.get("is_delegated_auth"): raise exception.Forbidden(_("Cannot authorize a request token" " with a token issued via delegation.")) req_token = self.oauth_api.get_request_token(request_token_id) expires_at = req_token["expires_at"] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time(timeutils.parse_isotime(expires_at)) if now > expires: raise exception.Unauthorized(_("Request token is expired")) # put the roles in a set for easy comparison authed_roles = set() for role in roles: authed_roles.add(role["id"]) # verify the authorizing user has the roles user_token = token_model.KeystoneToken( token_id=context["token_id"], token_data=self.token_provider_api.validate_token(context["token_id"]) ) user_id = user_token.user_id project_id = req_token["requested_project_id"] user_roles = self.assignment_api.get_roles_for_user_and_project(user_id, project_id) cred_set = set(user_roles) if not cred_set.issuperset(authed_roles): msg = _("authorizing user does not have role required") raise exception.Unauthorized(message=msg) # create list of just the id's for the backend role_list = list(authed_roles) # verify the user has the project too req_project_id = req_token["requested_project_id"] user_projects = self.assignment_api.list_projects_for_user(user_id) for user_project in user_projects: if user_project["id"] == req_project_id: break else: msg = _("User is not a member of the requested project") raise exception.Unauthorized(message=msg) # finally authorize the token authed_token = self.oauth_api.authorize_request_token(request_token_id, user_id, role_list) to_return = {"token": {"oauth_verifier": authed_token["verifier"]}} return to_return
def coerce(obj, attr, value): if isinstance(value, six.string_types): # NOTE(danms): Being tolerant of isotime strings here will help us # during our objects transition value = timeutils.parse_isotime(value) elif not isinstance(value, datetime.datetime): raise ValueError('A datetime.datetime is required here') return value
def test_datetime_or_none(self): naive_dt = datetime.datetime.now() dt = timeutils.parse_isotime(timeutils.isotime(naive_dt)) self.assertEqual(utils.datetime_or_none(dt), dt) self.assertEqual( utils.datetime_or_none(dt), naive_dt.replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0)) self.assertIsNone(utils.datetime_or_none(None)) self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def test_multiple_samples(self): """Send multiple samples. The usecase here is to reduce the chatter and send the counters at a slower cadence. """ samples = [] for x in range(6): dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) s = {'counter_name': 'apples', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': float(x * 3), 'source': 'evil', 'timestamp': dt.isoformat(), 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': str(x), 'name2': str(x + 4)}} samples.append(s) data = self.post_json('/meters/apples/', samples) for x, s in enumerate(samples): # source is modified to include the project_id. s['source'] = '%s:%s' % (s['project_id'], s['source']) # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # remove tzinfo to compare generated timestamp # with the provided one c = data.json[x] timestamp = timeutils.parse_isotime(c['timestamp']) c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() # do the same on the pipeline msg = self.published[0][x] timestamp = timeutils.parse_isotime(msg['timestamp']) msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() self.assertEqual(s, c) self.assertEqual(s, self.published[0][x])
def assertEqualTokens(self, a, b): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): token['access']['token']['id'] = 'dummy' del token['access']['token']['expires'] del token['access']['token']['issued_at'] return token self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['expires']), timeutils.parse_isotime(b['access']['token']['expires'])) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['issued_at']), timeutils.parse_isotime(b['access']['token']['issued_at'])) return self.assertDictEqual(normalize(a), normalize(b))
def build_token_values_v2(access, default_domain_id): token_data = access['token'] token_values = { 'expires_at': timeutils.normalize_time(timeutils.parse_isotime( token_data['expires'])), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at'])) } token_values['user_id'] = access.get('user', {}).get('id') project = token_data.get('tenant') if project is not None: token_values['project_id'] = project['id'] else: token_values['project_id'] = None token_values['identity_domain_id'] = default_domain_id token_values['assignment_domain_id'] = default_domain_id trust = token_data.get('trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_id'] token_values['trustee_id'] = trust['trustee_id'] token_values['consumer_id'] = None token_values['access_token_id'] = None role_list = [] # Roles are by ID in metadata and by name in the user section roles = access.get('metadata', {}).get('roles', []) for role in roles: role_list.append(role) token_values['roles'] = role_list return token_values
def test_dt_serializer(self): class Obj(object): foo = utils.dt_serializer('bar') obj = Obj() obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z') self.assertEqual('1955-11-05T00:00:00Z', obj.foo()) obj.bar = None self.assertIsNone(obj.foo()) obj.bar = 'foo' self.assertRaises(AttributeError, obj.foo)
def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime(timeutils.isotime(now)).replace(tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context, "host", now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, "host"), args[:2]) self.assertEqual({}, kwargs)
def test_building_unscoped_accessinfo(self): auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE, body=UNSCOPED_TOKEN) self.assertTrue(auth_ref) self.assertIn('methods', auth_ref) self.assertNotIn('catalog', auth_ref) self.assertEqual(auth_ref.auth_token, '3e2813b7ba0b4006840c3825860b86ed') self.assertEqual(auth_ref.username, 'exampleuser') self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a') self.assertEqual(auth_ref.role_ids, []) self.assertEqual(auth_ref.role_names, []) self.assertIsNone(auth_ref.project_name) self.assertIsNone(auth_ref.project_id) self.assertIsNone(auth_ref.auth_url) self.assertIsNone(auth_ref.management_url) self.assertFalse(auth_ref.domain_scoped) self.assertFalse(auth_ref.project_scoped) self.assertEqual(auth_ref.user_domain_id, '4e6893b7ba0b4006840c3845660b86ed') self.assertEqual(auth_ref.user_domain_name, 'exampledomain') self.assertIsNone(auth_ref.project_domain_id) self.assertIsNone(auth_ref.project_domain_name) self.assertEqual( auth_ref.expires, timeutils.parse_isotime(UNSCOPED_TOKEN['token']['expires_at'])) self.assertEqual( auth_ref.issued, timeutils.parse_isotime(UNSCOPED_TOKEN['token']['issued_at'])) self.assertEqual(auth_ref.expires, UNSCOPED_TOKEN.expires) self.assertEqual(auth_ref.issued, UNSCOPED_TOKEN.issued)
def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime( timeutils.isotime(now)).replace(tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout( self.context, 'host', now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, 'host'), args[:2]) self.assertEqual({}, kwargs)
def _extract_when(body): """Extract the generated datetime from the notification.""" # NOTE: I am keeping the logic the same as it was in the collector, # However, *ALL* notifications should have a 'timestamp' field, it's # part of the notification envelope spec. If this was put here because # some openstack project is generating notifications without a # timestamp, then that needs to be filed as a bug with the offending # project (mdragon) when = body.get('timestamp', body.get('_context_timestamp')) if when: return timeutils.normalize_time(timeutils.parse_isotime(when)) return timeutils.utcnow()