def is_ec2_timestamp_expired(request, expires=None): """Checks the timestamp or expiry time included in an EC2 request and returns true if the request is expired """ query_time = None timestamp = request.get('Timestamp') expiry_time = request.get('Expires') try: if timestamp and expiry_time: msg = _("Request must include either Timestamp or Expires," " but cannot contain both") LOG.error(msg) raise exception.InvalidRequest(msg) elif expiry_time: query_time = timeutils.parse_strtime(expiry_time, "%Y-%m-%dT%H:%M:%SZ") return timeutils.is_older_than(query_time, -1) elif timestamp: query_time = timeutils.parse_strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ") # Check if the difference between the timestamp in the request # and the time on our servers is larger than 5 minutes, the # request is too old (or too new). if query_time and expires: return timeutils.is_older_than(query_time, expires) or \ timeutils.is_newer_than(query_time, expires) return False except ValueError: LOG.audit(_("Timestamp is invalid.")) return True
def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] terminated_at = instance['terminated_at'] if terminated_at is not None: if not isinstance(terminated_at, datetime.datetime): terminated_at = timeutils.parse_strtime(terminated_at, "%Y-%m-%d %H:%M:%S.%f") if launched_at is not None: if not isinstance(launched_at, datetime.datetime): launched_at = timeutils.parse_strtime(launched_at, "%Y-%m-%d %H:%M:%S.%f") if terminated_at and terminated_at < period_start: return 0 # nothing if it started after the usage report ended if launched_at and launched_at > period_stop: return 0 if launched_at: # if instance launched after period_started, don't charge for first start = max(launched_at, period_start) if terminated_at: # if instance stopped before period_stop, don't charge after stop = min(period_stop, terminated_at) else: # instance is still running, so charge them up to current time stop = period_stop dt = stop - start seconds = (dt.days * 3600 * 24 + dt.seconds + dt.microseconds / 100000.0) return seconds / 3600.0 else: # instance hasn't launched, so no charge return 0
def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] terminated_at = instance['terminated_at'] if terminated_at is not None: if not isinstance(terminated_at, datetime.datetime): terminated_at = timeutils.parse_strtime( terminated_at, "%Y-%m-%d %H:%M:%S.%f") if launched_at is not None: if not isinstance(launched_at, datetime.datetime): launched_at = timeutils.parse_strtime(launched_at, "%Y-%m-%d %H:%M:%S.%f") if terminated_at and terminated_at < period_start: return 0 # nothing if it started after the usage report ended if launched_at and launched_at > period_stop: return 0 if launched_at: # if instance launched after period_started, don't charge for first start = max(launched_at, period_start) if terminated_at: # if instance stopped before period_stop, don't charge after stop = min(period_stop, terminated_at) else: # instance is still running, so charge them up to current time stop = period_stop dt = stop - start seconds = (dt.days * 3600 * 24 + dt.seconds + dt.microseconds / 100000.0) return seconds / 3600.0 else: # instance hasn't launched, so no charge return 0
def is_ec2_timestamp_expired(request, expires=None): """Checks the timestamp or expiry time included in a EC2 request and returns true if the request is expired """ query_time = None timestamp = request.get('Timestamp') expiry_time = request.get('Expires') try: if timestamp and expiry_time: msg = _("Request must include either Timestamp or Expires," " but cannot contain both") LOG.error(msg) raise exception.InvalidRequest(msg) elif expiry_time: query_time = timeutils.parse_strtime(expiry_time, "%Y-%m-%dT%H:%M:%SZ") return timeutils.is_older_than(query_time, -1) elif timestamp: query_time = timeutils.parse_strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ") # Check if the difference between the timestamp in the request # and the time on our servers is larger than 5 minutes, the # request is too old (or too new). if query_time and expires: return timeutils.is_older_than(query_time, expires) or \ timeutils.is_newer_than(query_time, expires) return False except ValueError: LOG.audit(_("Timestamp is invalid.")) return True
def _parse_datetime(self, dtstr): if not dtstr: return timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): return dtstr try: return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S") except Exception: try: return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f") except Exception: return timeutils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
def test_return_valid_isoformat(self): """ Ensure that the ec2 api returns datetime in xs:dateTime (which apparently isn't datetime.isoformat()) NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 """ conv = apirequest._database_to_isoformat # sqlite database representation with microseconds time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276", "%Y-%m-%d %H:%M:%S.%f") self.assertEqual(conv(time_to_convert), "2011-02-21T20:14:10.634Z") # mysqlite database representation time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18", "%Y-%m-%d %H:%M:%S") self.assertEqual(conv(time_to_convert), "2011-02-21T19:56:18.000Z")
def test_return_valid_isoformat(self): """Ensure that the ec2 api returns datetime in xs:dateTime (which apparently isn't datetime.isoformat()) NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 """ conv = apirequest._database_to_isoformat # sqlite database representation with microseconds time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276", "%Y-%m-%d %H:%M:%S.%f") self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z') # mysqlite database representation time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18", "%Y-%m-%d %H:%M:%S") self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
def _backup_metadata_get(self, backup): backup_ts = timeutils.parse_strtime( backup.get(meta.BACKUP_AT_KEY)) backup_for = backup.get(meta.BACKUP_FOR_KEY) satisfies = jsonutils.loads( backup.get(meta.BACKUP_SATISFIES_KEY, '[]')) return (backup_ts, backup_for, satisfies)
def parse_strtime(strtime): if _ms_time_regex.match(strtime): # NOTE(MotoKen): time format for aws-sdk-java contains millisecond time_format = "%Y-%m-%dT%H:%M:%S.%fZ" else: time_format = "%Y-%m-%dT%H:%M:%SZ" return timeutils.parse_strtime(strtime, time_format)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn( _('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name if overwrite or not hasattr(local.store, 'context'): self.update_store()
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, domain_id=None, project_domain_id=None, user_domain_id=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.domain_id = domain_id self.project_domain_id = project_domain_id self.user_domain_id = user_domain_id self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('volume',)] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store()
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('volume', 'volumev2')] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin #self.av_zone = "" if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store()
def fake_vpn_instance(): return { "id": 7, "image_ref": CONF.vpn_image_id, "vm_state": "active", "created_at": timeutils.parse_strtime("1981-10-20T00:00:00.000000"), "uuid": 7777, "project_id": "other", }
def fake_vpn_instance(): return { 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active', 'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'), 'uuid': 7777, 'project_id': 'other', }
def _get_most_recent_update(self, versions): recent = None for version in versions: updated = timeutils.parse_strtime(version["updated"], "%Y-%m-%dT%H:%M:%SZ") if not recent: recent = updated elif updated > recent: recent = updated return recent.strftime("%Y-%m-%dT%H:%M:%SZ")
def instance_update(self, context, instance_uuid, updates, service=None): for key, value in updates.iteritems(): if key not in allowed_updates: LOG.error(_("Instance update attempted for " "'%(key)s' on %(instance_uuid)s") % locals()) raise KeyError("unexpected update keyword '%s'" % key) if key in datetime_fields and isinstance(value, basestring): updates[key] = timeutils.parse_strtime(value) old_ref, instance_ref = self.db.instance_update_and_get_original(context, instance_uuid, updates) notifications.send_update(context, old_ref, instance_ref, service) return jsonutils.to_primitive(instance_ref)
def _parse_datetime(self, dtstr): if not dtstr: return timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): return dtstr for format in VALID_DATETIME_FORMAT: try: return timeutils.parse_strtime(dtstr, format) except ValueError: continue return None
def _parse_datetime(self, dtstr): if not dtstr: value = timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): value = dtstr try: value = timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S") except Exception: try: value = timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f") except Exception: value = timeutils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f") # NOTE(mriedem): Instance object DateTime fields are timezone-aware # so we have to force UTC timezone for comparing this datetime against # instance object fields and still maintain backwards compatibility # in the API. if value.utcoffset() is None: value = value.replace(tzinfo=iso8601.iso8601.Utc()) return value
def _parse_glance_iso8601_timestamp(timestamp): """Parse a subset of iso8601 timestamps into datetime objects.""" iso_formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S"] for iso_format in iso_formats: try: return timeutils.parse_strtime(timestamp, iso_format) except ValueError: pass raise ValueError(_("%(timestamp)s does not follow any of the " "signatures: %(iso_formats)s") % locals())
def _get_most_recent_update(self, versions): recent = None for version in versions: updated = timeutils.parse_strtime(version['updated'], '%Y-%m-%dT%H:%M:%SZ') if not recent: recent = updated elif updated > recent: recent = updated return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
def instance_update(self, context, instance_uuid, updates): for key, value in updates.iteritems(): if key not in allowed_updates: LOG.error(_("Instance update attempted for " "'%(key)s' on %(instance_uuid)s") % locals()) raise KeyError("unexpected update keyword '%s'" % key) if key in datetime_fields and isinstance(value, basestring): updates[key] = timeutils.parse_strtime(value) old_ref, instance_ref = self.db.instance_update_and_get_original( context, instance_uuid, updates) notifications.send_update(context, old_ref, instance_ref) return jsonutils.to_primitive(instance_ref)
def _parse_glance_iso8601_timestamp(timestamp): """Parse a subset of iso8601 timestamps into datetime objects.""" iso_formats = ['%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'] for iso_format in iso_formats: try: return timeutils.parse_strtime(timestamp, iso_format) except ValueError: pass raise ValueError( _('%(timestamp)s does not follow any of the ' 'signatures: %(iso_formats)s') % locals())
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.service_catalog = service_catalog # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name if overwrite or not hasattr(local.store, 'context'): self.update_store()
def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] if isinstance(last_heartbeat, basestring): # NOTE(russellb) If this service_ref came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) # Timestamps in DB are UTC. elapsed = utils.total_seconds(timeutils.utcnow() - last_heartbeat) LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s', {'lhb': str(last_heartbeat), 'el': str(elapsed)}) return abs(elapsed) <= CONF.service_down_time
def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] if isinstance(last_heartbeat, basestring): # NOTE(russellb) If this service_ref came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) # Timestamps in DB are UTC. elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s', {'lhb': str(last_heartbeat), 'el': str(elapsed)}) return abs(elapsed) <= CONF.service_down_time
def _parse_datetime(self, dtstr): value = None if not dtstr: value = timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): value = dtstr for format in VALID_DATETIME_FORMAT: try: value = timeutils.parse_strtime(dtstr, format) break except ValueError: continue # NOTE(mriedem): Instance object DateTime fields are timezone-aware # so we have to force UTC timezone for comparing this datetime against # instance object fields and still maintain backwards compatibility # in the API. if value and value.utcoffset() is None: value = value.replace(tzinfo=iso8601.iso8601.Utc()) return value
def _update_cache_entry(self, state): entry = {} host = state['host_name'] entry['trust_lvl'] = state['trust_lvl'] try: # Normalize as naive object to interoperate with utcnow(). entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: try: # Mt. Wilson does not necessarily return an ISO8601 formatted # `vtime`, so we should try to parse it as a string formatted # datetime. vtime = timeutils.parse_strtime(state['vtime'], fmt="%c") entry['vtime'] = timeutils.normalize_time(vtime) except ValueError: # Mark the system as un-trusted if get invalid vtime. entry['trust_lvl'] = 'unknown' entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry
def convert_datetimes(values, *datetime_keys): for key in values: if key in datetime_keys and isinstance(values[key], basestring): values[key] = timeutils.parse_strtime(values[key]) return values
def _get_datetime_from_filename(self, timestamp_filename): ts = timestamp_filename.lstrip(TIMESTAMP_PREFIX) return timeutils.parse_strtime(ts, fmt=TIMESTAMP_FORMAT)
def parse_strtime(dstr, fmt): try: return timeutils.parse_strtime(dstr, fmt) except (TypeError, ValueError) as e: raise exception.InvalidStrTime(reason=unicode(e))