def test_get_key(self):
        original_secret_metadata = mock.Mock()
        original_secret_metadata.algorithm = mock.sentinel.alg
        original_secret_metadata.bit_length = mock.sentinel.bit
        original_secret_metadata.secret_type = 'symmetric'

        key_id = "43ed09c3-e551-4c24-b612-e619abe9b534"
        key_ref = ("http://localhost:9311/v1/secrets/" + key_id)
        original_secret_metadata.secret_ref = key_ref

        created = timeutils.parse_isotime('2015-10-20 18:51:17+00:00')
        original_secret_metadata.created = created
        created_formatted = timeutils.parse_isotime(str(created))
        created_posix = calendar.timegm(created_formatted.timetuple())

        key_name = 'my key'
        original_secret_metadata.name = key_name

        original_secret_data = b'test key'
        original_secret_metadata.payload = original_secret_data

        self.mock_barbican.secrets.get.return_value = original_secret_metadata
        key = self.key_mgr.get(self.ctxt, self.key_id)

        self.get.assert_called_once_with(self.secret_ref)
        self.assertEqual(key_id, key.id)
        self.assertEqual(key_name, key.name)
        self.assertEqual(original_secret_data, key.get_encoded())
        self.assertEqual(created_posix, key.created)
    def __init__(self, parent_acl, entity_ref=None, users=None,
                 project_access=None, operation_type=None,
                 created=None, updated=None):
        """Per Operation ACL data instance for secret or container.

        This class not to be instantiated outside of this module.

        :param parent_acl: acl entity to this per operation data belongs to
        :param str entity_ref: Full HATEOAS reference to a secret or container
        :param users: List of Keystone userid(s) to be used for ACL.
        :type users: List or None
        :param bool project_access: Flag indicating project access behavior
        :param str operation_type: Type indicating which class of Barbican
            operations this ACL is defined for e.g. 'read' operations
        :param str created: Time string indicating ACL create timestamp. This
            is populated only when populating data from api response. Not
            needed in client input.
        :param str updated: Time string indicating ACL last update timestamp.
            This is populated only when populating data from api response. Not
            needed in client input.
        """
        self._parent_acl = parent_acl
        self._entity_ref = entity_ref
        self._users = users if users else list()
        self._project_access = project_access
        self._operation_type = operation_type
        self._created = parse_isotime(created) if created else None
        self._updated = parse_isotime(updated) if updated else None
Beispiel #3
0
 def test_compare_micros(self):
     zulu = timeutils.parse_isotime('2012-02-14T20:53:07.6544')
     east = timeutils.parse_isotime('2012-02-14T19:53:07.654321-01:00')
     west = timeutils.parse_isotime('2012-02-14T21:53:07.655+01:00')
     self.assertTrue(east < west)
     self.assertTrue(east < zulu)
     self.assertTrue(zulu < west)
Beispiel #4
0
 def _is_valid_date_filter(self, date_filter):
     filters = date_filter.split(',')
     sorted_filters = dict()
     try:
         for filter in filters:
             if filter.startswith('gt:'):
                 if sorted_filters.get('gt') or sorted_filters.get('gte'):
                     return False
                 sorted_filters['gt'] = timeutils.parse_isotime(filter[3:])
             elif filter.startswith('gte:'):
                 if sorted_filters.get('gt') or sorted_filters.get(
                         'gte') or sorted_filters.get('eq'):
                     return False
                 sorted_filters['gte'] = timeutils.parse_isotime(filter[4:])
             elif filter.startswith('lt:'):
                 if sorted_filters.get('lt') or sorted_filters.get('lte'):
                     return False
                 sorted_filters['lt'] = timeutils.parse_isotime(filter[3:])
             elif filter.startswith('lte:'):
                 if sorted_filters.get('lt') or sorted_filters.get(
                         'lte') or sorted_filters.get('eq'):
                     return False
                 sorted_filters['lte'] = timeutils.parse_isotime(filter[4:])
             elif sorted_filters.get('eq') or sorted_filters.get(
                     'gte') or sorted_filters.get('lte'):
                 return False
             else:
                 sorted_filters['eq'] = timeutils.parse_isotime(filter)
     except ValueError:
         return False
     return True
Beispiel #5
0
 def validate(self, value, context, template=None):
     try:
         timeutils.parse_isotime(value)
     except Exception:
         return False
     else:
         return True
Beispiel #6
0
    def __images_by_windowed_meta(
        self,
        context,
        period_start,
        period_stop,
        project_id,
        metadata
    ):
        """Simulate second the bottomost layer.

        :param context:
        :param period_start: String
        :param period_stop: String
        :param project_id: String
        :param metadata: Dict
        """
        period_start = timeutils.parse_isotime(period_start)
        period_stop = timeutils.parse_isotime(period_stop)
        image_list = self.___images_by_windowed_meta(
            context,
            period_start,
            period_stop,
            project_id,
            metadata
        )
        return image_list
Beispiel #7
0
def build_token_values_v2(access, admin_domain_id):
    token_data = access['token']

    token_expires_at = timeutils.parse_isotime(token_data['expires'])

    # Trim off the microseconds because the revocation event only has
    # expirations accurate to the second.
    token_expires_at = token_expires_at.replace(microsecond=0)

    token_values = {
        'expires_at': timeutils.normalize_time(token_expires_at),
        'issued_at': timeutils.normalize_time(
            timeutils.parse_isotime(token_data['issued_at'])),
        'audit_id': token_data.get('audit_ids', [None])[0],
        'audit_chain_id': token_data.get('audit_ids', [None])[-1],
    }

    token_values['user_id'] = access.get('user', {}).get('id')

    project = token_data.get('tenant')
    if project is not None:
        token_values['project_id'] = project['id']
    else:
        token_values['project_id'] = None

    token_values['identity_domain_id'] = admin_domain_id
    token_values['assignment_domain_id'] = admin_domain_id

    role_list = []
    # Roles are by ID in metadata and by name in the user section
    roles = access.get('metadata', {}).get('roles', [])
    for role in roles:
        role_list.append(role)
    token_values['roles'] = role_list
    return token_values
Beispiel #8
0
 def _add_secret(self, session, project, name, created_at, updated_at):
     s = models.Secret()
     s.name = name
     s.created_at = timeutils.parse_isotime(created_at)
     s.updated_at = timeutils.parse_isotime(updated_at)
     s.project_id = project.id
     session.add(s)
Beispiel #9
0
 def test_fetch_basic(self):
     ts = carbonara.AggregatedTimeSerie(
         [datetime.datetime(2014, 1, 1, 12, 0, 0),
          datetime.datetime(2014, 1, 1, 12, 0, 4),
          datetime.datetime(2014, 1, 1, 12, 0, 9)],
         [3, 5, 6],
         sampling="1s")
     self.assertEqual(
         [(datetime.datetime(2014, 1, 1, 12), 1, 3),
          (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
          (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
         ts.fetch())
     self.assertEqual(
         [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
          (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
         ts.fetch(from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4)))
     self.assertEqual(
         [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
          (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
         ts.fetch(
             from_timestamp=timeutils.parse_isotime(
                 "2014-01-01 12:00:04")))
     self.assertEqual(
         [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
          (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
         ts.fetch(
             from_timestamp=timeutils.parse_isotime(
                 "2014-01-01 13:00:04+01:00")))
 def _fill_from_data(self, meta=None, expiration=None,
                     plugin_name=None, plugin_ca_id=None, created=None,
                     updated=None, status=None, creator_id=None):
     if meta:
         for s in meta:
             key = list(s.keys())[0]
             value = list(s.values())[0]
             if key == 'name':
                 self._name = value
             if key == 'description':
                 self._description = value
     self._plugin_name = plugin_name
     self._plugin_ca_id = plugin_ca_id
     self._expiration = expiration
     self._creator_id = creator_id
     if self._expiration:
         self._expiration = parse_isotime(self._expiration)
     if self._ca_ref:
         self._status = status
         self._created = created
         self._updated = updated
         if self._created:
             self._created = parse_isotime(self._created)
         if self._updated:
             self._updated = parse_isotime(self._updated)
     else:
         self._status = None
         self._created = None
         self._updated = None
Beispiel #11
0
def split_filter_op(expression):
    """Split operator from threshold in an expression.
    Designed for use on a comparative-filtering query field.
    When no operator is found, default to an equality comparison.

    :param expression: the expression to parse

    :returns: a tuple (operator, threshold) parsed from expression
    """
    left, sep, right = expression.partition(':')
    if sep:
        # If the expression is a date of the format ISO 8601 like
        # CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should
        # not be partitioned, and a default operator of eq should be
        # assumed.
        try:
            timeutils.parse_isotime(expression)
            op = 'eq'
            threshold = expression
        except ValueError:
            op = left
            threshold = right
    else:
        op = 'eq'  # default operator
        threshold = left

    # NOTE stevelle decoding escaped values may be needed later
    return op, threshold
Beispiel #12
0
    def _hours_for(self, instance, period_start, period_stop):
        launched_at = instance.launched_at
        terminated_at = instance.terminated_at
        if terminated_at is not None:
            if not isinstance(terminated_at, datetime.datetime):
                # NOTE(mriedem): Instance object DateTime fields are
                # timezone-aware so convert using isotime.
                terminated_at = timeutils.parse_isotime(terminated_at)

        if launched_at is not None:
            if not isinstance(launched_at, datetime.datetime):
                launched_at = timeutils.parse_isotime(launched_at)

        if terminated_at and terminated_at < period_start:
            return 0
        # nothing if it started after the usage report ended
        if launched_at and launched_at > period_stop:
            return 0
        if launched_at:
            # if instance launched after period_started, don't charge for first
            start = max(launched_at, period_start)
            if terminated_at:
                # if instance stopped before period_stop, don't charge after
                stop = min(period_stop, terminated_at)
            else:
                # instance is still running, so charge them up to current time
                stop = period_stop
            dt = stop - start
            seconds = (dt.days * 3600 * 24 + dt.seconds +
                       dt.microseconds / 100000.0)

            return seconds / 3600.0
        else:
            # instance hasn't launched, so no charge
            return 0
Beispiel #13
0
def _validate_datetime_format(instance):
    try:
        timeutils.parse_isotime(instance)
    except ValueError:
        return False
    else:
        return True
Beispiel #14
0
    def sync_instances(self, req, body):
        """Tell all cells to sync instance info."""
        context = req.environ['nova.context']

        authorize(context)
        authorize(context, action="sync_instances")

        project_id = body.pop('project_id', None)
        deleted = body.pop('deleted', False)
        updated_since = body.pop('updated_since', None)
        if body:
            msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
                    "understood.")
            raise exc.HTTPBadRequest(explanation=msg)
        if isinstance(deleted, six.string_types):
            try:
                deleted = strutils.bool_from_string(deleted, strict=True)
            except ValueError as err:
                raise exc.HTTPBadRequest(explanation=six.text_type(err))
        if updated_since:
            try:
                timeutils.parse_isotime(updated_since)
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
        self.cells_rpcapi.sync_instances(context, project_id=project_id,
                updated_since=updated_since, deleted=deleted)
Beispiel #15
0
    def _fill_from_data(self, name=None, expiration=None, algorithm=None,
                        bit_length=None, mode=None, payload=None,
                        payload_content_type=None,
                        payload_content_encoding=None, created=None,
                        updated=None, content_types=None, status=None):
        self._name = name
        self._algorithm = algorithm
        self._bit_length = bit_length
        self._mode = mode
        self._payload = payload
        self._payload_content_encoding = payload_content_encoding
        self._expiration = expiration
        if self._expiration:
            self._expiration = parse_isotime(self._expiration)
        if self._secret_ref:
            self._content_types = content_types
            self._status = status
            self._created = created
            self._updated = updated
            if self._created:
                self._created = parse_isotime(self._created)
            if self._updated:
                self._updated = parse_isotime(self._updated)
        else:
            self._content_types = None
            self._status = None
            self._created = None
            self._updated = None

        if not self._content_types:
            self._payload_content_type = payload_content_type
        else:
            self._payload_content_type = self._content_types.get('default',
                                                                 None)
Beispiel #16
0
    def __init__(self, api, type, status=None, created=None, updated=None,
                 meta=None, order_ref=None, error_status_code=None,
                 error_reason=None):
        super(Order, self).__init__()

        self._api = api
        self._type = type
        self._status = status

        if created:
            self._created = parse_isotime(created)
        else:
            self._created = None

        if updated:
            self._updated = parse_isotime(updated)
        else:
            self._updated = None

        self._order_ref = order_ref

        self._meta = base.filter_null_keys(meta)

        self._error_status_code = error_status_code
        self._error_reason = error_reason

        if 'expiration' in self._meta.keys():
            self._meta['expiration'] = parse_isotime(self._meta['expiration'])
Beispiel #17
0
 def test_compare(self):
     zulu = timeutils.parse_isotime('2012-02-14T20:53:07')
     east = timeutils.parse_isotime('2012-02-14T20:53:07-01:00')
     west = timeutils.parse_isotime('2012-02-14T20:53:07+01:00')
     self.assertTrue(east > west)
     self.assertTrue(east > zulu)
     self.assertTrue(zulu > west)
Beispiel #18
0
    def get_version(cls, payload, timestamp=None):
        """Combine updated_at|created_at epoch with notification timestamp as a
        version number, format is
        <right 9 digits update epoch(create epoch)><right 9 digits timestamp
        in milliseconds>, if timestamp is not present(sync indexing), fill in
        9 digits of zero instead.

        The reason we combine update time and timestamp together is the
        precision of update time is limit to seconds. It's not accurate enough
        to use as a version.

        The max version in Elasticsearch is 9.2e+18, allowing 19 digits at
        most. Our version is 18 digits long, leaves 1 digit for conservation.

        The updated epoch is 10 digits long, we strip off its leading digit,
        concatenate it with the right 9 digits of timestamp in milliseconds,
        and we get a 18 digits long version.

        The truncating has some potential things to be noted, similar to Y2K
        problem.

        Let's say we have an updated epoch 1450161655. Stripping off the
        leading '1' from the current epoch seconds 'rebases' our epoch around
        1984(450161655). By the time we get to an updated epoch beginning '199'
        we're somewhere around 2033, and truncating epoch to 2001. Once the
        clock flips round to begin '200'(year 2033) things will stop working
        because we'll suddenly be using epoch that look like they're from 1969.
        We can address this before that happens; worst case is that you'd have
        to reindex everything, or reset the version.

        The "timestamp" has similiar issues. When the "timestamp" overflowed
        the 9-digit field, time becomes indistinguishable. The 9 digits
        millisecond precision gives us around 27 hours. It should be enough to
        distinguish notifications with different timestamps.
        """
        updated = None
        if payload.get('updated_at'):
            updated = payload['updated_at']
        elif payload.get('created_at'):
            updated = payload['created_at']
        else:
            msg = ('Failed to build elasticsearch version, updated_at and '
                   'created_at not exist, %s' % str(payload)
                   )
            raise exception.SearchlightException(message=msg)

        updated_obj = timeutils.parse_isotime(updated)
        updated_epoch = int(calendar.timegm(updated_obj.utctimetuple()))
        if timestamp:
            timestamp_obj = timeutils.parse_isotime(timestamp)
            timestamp_epoch = int(calendar.timegm(
                timestamp_obj.utctimetuple()))
            timestamp_milli = (timestamp_epoch * 1000 +
                               timestamp_obj.microsecond // 1000)
            truncate_timestamp = str(timestamp_milli)[-9:].zfill(9)
            # truncate updated epoch because we are run out of numbers.
            return '%s%s' % (str(updated_epoch)[-9:], truncate_timestamp)
        else:
            return '%s%s' % (str(updated_epoch)[-9:], '0' * 9)
def _validate_datetime_format(instance):
    try:
        if isinstance(instance, jsonschema.compat.str_types):
            timeutils.parse_isotime(instance)
    except ValueError:
        return False
    else:
        return True
Beispiel #20
0
 def assertServerUsage(self, server, launched_at, terminated_at):
     resp_launched_at = timeutils.parse_isotime(
         server.get('%slaunched_at' % self.prefix))
     self.assertEqual(timeutils.normalize_time(resp_launched_at),
                      launched_at)
     resp_terminated_at = timeutils.parse_isotime(
         server.get('%sterminated_at' % self.prefix))
     self.assertEqual(timeutils.normalize_time(resp_terminated_at),
                      terminated_at)
Beispiel #21
0
    def get_raw_measures(self, metric, serie, from_timestamp=None,
                         to_timestamp=None, aggregation="mean",
                         archive_policy=None):
        if from_timestamp:
            from_timestamp = self._timestamp_to_utc(from_timestamp)
        if to_timestamp:
            to_timestamp = self._timestamp_to_utc(to_timestamp)

        if from_timestamp:
            first_measure_timestamp = from_timestamp
        else:
            result = self._query(metric, "select * from \"%(metric_id)s\" limit 1" %
                                 dict(metric_id=metric))
            result = list(result[metric])
            if result:
                first_measure_timestamp = self._timestamp_to_utc(
                    timeutils.parse_isotime(result[0]['time']))
            else:
                first_measure_timestamp = None

        query = ("SELECT %(aggregation)s(value) FROM \"%(metric_id)s\""
                 % dict(aggregation=aggregation,
                        metric_id=metric))
        where_query = parse_serie(serie)
        timestamp_query = self._make_time_query(first_measure_timestamp,
                                                to_timestamp, 1)

        results = []
        for definition in sorted(
                archive_policy.definition,
                key=operator.attrgetter('granularity')):
            time_query = self._make_time_query(
                first_measure_timestamp,
                to_timestamp,
                definition.granularity)
            subquery = " and ".join([where_query, timestamp_query])
            subquery = (query +
                        " WHERE %(times)s GROUP BY time(%(granularity)ds) "
                        "fill(none) LIMIT %(points)d" %
                        dict(times=subquery,
                             granularity=definition.granularity,
                             points=definition.points))

            result = self._query(metric, subquery)

            subresults = []
            for point in result[metric]:
                timestamp = self._timestamp_to_utc(
                    timeutils.parse_isotime(point['time']))
                if (point[aggregation] is not None and
                    ((from_timestamp is None or timestamp >= from_timestamp)
                     and (to_timestamp is None or timestamp < to_timestamp))):
                    subresults.insert(0, (timestamp,
                                          definition.granularity,
                                          point[aggregation]))
            results.extend(subresults)
        return results
Beispiel #22
0
    def _index(self, req, add_link=False, next_link=False, add_uuid=False,
               sort_dirs=None, sort_keys=None, limit=None, marker=None,
               allow_changes_since=False, allow_changes_before=False):
        context = req.environ['nova.context']
        context.can(migrations_policies.POLICY_ROOT % 'index')
        search_opts = {}
        search_opts.update(req.GET)
        if 'changes-since' in search_opts:
            if allow_changes_since:
                search_opts['changes-since'] = timeutils.parse_isotime(
                    search_opts['changes-since'])
            else:
                # Before microversion 2.59, the changes-since filter was not
                # supported in the DB API. However, the schema allowed
                # additionalProperties=True, so a user could pass it before
                # 2.59 and filter by the updated_at field if we don't remove
                # it from search_opts.
                del search_opts['changes-since']

        if 'changes-before' in search_opts:
            if allow_changes_before:
                search_opts['changes-before'] = timeutils.parse_isotime(
                    search_opts['changes-before'])
                changes_since = search_opts.get('changes-since')
                if (changes_since and search_opts['changes-before'] <
                        search_opts['changes-since']):
                    msg = _('The value of changes-since must be less than '
                            'or equal to changes-before.')
                    raise exc.HTTPBadRequest(explanation=msg)
            else:
                # Before microversion 2.59 the schema allowed
                # additionalProperties=True, so a user could pass
                # changes-before before 2.59 and filter by the updated_at
                # field if we don't remove it from search_opts.
                del search_opts['changes-before']

        if sort_keys:
            try:
                migrations = self.compute_api.get_migrations_sorted(
                    context, search_opts,
                    sort_dirs=sort_dirs, sort_keys=sort_keys,
                    limit=limit, marker=marker)
            except exception.MarkerNotFound as e:
                raise exc.HTTPBadRequest(explanation=e.format_message())
        else:
            migrations = self.compute_api.get_migrations(
                context, search_opts)

        migrations = self._output(req, migrations, add_link, add_uuid)
        migrations_dict = {'migrations': migrations}

        if next_link:
            migrations_links = self._view_builder.get_links(req, migrations)
            if migrations_links:
                migrations_dict['migrations_links'] = migrations_links
        return migrations_dict
Beispiel #23
0
 def assertTimestampEqual(self, expected, value):
     # Compare two timestamps but ignore the microseconds part
     # of the expected timestamp. Keystone does not track microseconds and
     # is working to eliminate microseconds from it's datetimes used.
     expected = timeutils.parse_isotime(expected).replace(microsecond=0)
     value = timeutils.parse_isotime(value).replace(microsecond=0)
     self.assertEqual(
         expected,
         value,
         "%s != %s" % (expected, value))
Beispiel #24
0
    def _add_to_revocation_list(self, data, lock):
        filtered_list = []
        revoked_token_data = {}

        current_time = self._get_current_time()
        expires = data['expires']

        if isinstance(expires, six.string_types):
            expires = timeutils.parse_isotime(expires)

        expires = timeutils.normalize_time(expires)

        if expires < current_time:
            LOG.warning(_LW('Token `%s` is expired, not adding to the '
                            'revocation list.'), data['id'])
            return

        revoked_token_data['expires'] = utils.isotime(expires,
                                                      subsecond=True)
        revoked_token_data['id'] = data['id']

        token_list = self._get_key_or_default(self.revocation_key, default=[])
        if not isinstance(token_list, list):
            # NOTE(morganfainberg): In the case that the revocation list is not
            # in a format we understand, reinitialize it. This is an attempt to
            # not allow the revocation list to be completely broken if
            # somehow the key is changed outside of keystone (e.g. memcache
            # that is shared by multiple applications). Logging occurs at error
            # level so that the cloud administrators have some awareness that
            # the revocation_list needed to be cleared out. In all, this should
            # be recoverable. Keystone cannot control external applications
            # from changing a key in some backends, however, it is possible to
            # gracefully handle and notify of this event.
            LOG.error(_LE('Reinitializing revocation list due to error '
                          'in loading revocation list from backend.  '
                          'Expected `list` type got `%(type)s`. Old '
                          'revocation list data: %(list)r'),
                      {'type': type(token_list), 'list': token_list})
            token_list = []

        # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
        # to keep the list of tokens revoked at the minimum.
        for token_data in token_list:
            try:
                expires_at = timeutils.normalize_time(
                    timeutils.parse_isotime(token_data['expires']))
            except ValueError:
                LOG.warning(_LW('Removing `%s` from revocation list due to '
                                'invalid expires data in revocation list.'),
                            token_data.get('id', 'INVALID_TOKEN_DATA'))
                continue
            if expires_at > current_time:
                filtered_list.append(token_data)
        filtered_list.append(revoked_token_data)
        self._set_key(self.revocation_key, filtered_list, lock)
Beispiel #25
0
 def _get_active_by_window_joined(
     cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False
 ):
     # NOTE(mriedem): We need to convert the begin/end timestamp strings
     # to timezone-aware datetime objects for the DB API call.
     begin = timeutils.parse_isotime(begin)
     end = timeutils.parse_isotime(end) if end else None
     db_inst_list = db.instance_get_active_by_window_joined(
         context, begin, end, project_id, host, columns_to_join=_expected_cols(expected_attrs)
     )
     return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
    def test_token_expiry_maintained(self, mock_utcnow):
        now = datetime.datetime.utcnow()
        mock_utcnow.return_value = now
        foo_client = self.get_client(self.user_foo)

        orig_token = foo_client.service_catalog.catalog["token"]
        mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
        reauthenticated_token = foo_client.tokens.authenticate(token=foo_client.auth_token)

        self.assertCloseEnoughForGovernmentWork(
            timeutils.parse_isotime(orig_token["expires"]), timeutils.parse_isotime(reauthenticated_token.expires)
        )
    def assertTimestampsEqual(self, expected, actual):
        # The timestamp that we get back when parsing the payload may not
        # exactly match the timestamp that was put in the payload due to
        # conversion to and from a float.

        exp_time = timeutils.parse_isotime(expected)
        actual_time = timeutils.parse_isotime(actual)

        # the granularity of timestamp string is microseconds and it's only the
        # last digit in the representation that's different, so use a delta
        # just above nanoseconds.
        return self.assertCloseEnoughForGovernmentWork(exp_time, actual_time, delta=1e-05)
Beispiel #28
0
    def statistics(self, q=None, groupby=None, period=None, aggregate=None):
        """Computes the statistics of the samples in the time range given.

        :param q: Filter rules for the data to be returned.
        :param groupby: Fields for group by aggregation
        :param period: Returned result will be an array of statistics for a
                       period long of that number of seconds.
        :param aggregate: The selectable aggregation functions to be applied.
        """

        rbac.enforce('compute_statistics', pecan.request)

        q = q or []
        groupby = groupby or []
        aggregate = aggregate or []

        if period and period < 0:
            raise base.ClientSideError(_("Period must be positive."))

        kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
        kwargs['meter'] = self.meter_name
        f = storage.SampleFilter(**kwargs)
        g = _validate_groupby_fields(groupby)

        aggregate = utils.uniq(aggregate, ['func', 'param'])
        # Find the original timestamp in the query to use for clamping
        # the duration returned in the statistics.
        start = end = None
        for i in q:
            if i.field == 'timestamp' and i.op in ('lt', 'le'):
                end = timeutils.parse_isotime(i.value).replace(
                    tzinfo=None)
            elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
                start = timeutils.parse_isotime(i.value).replace(
                    tzinfo=None)

        try:
            computed = pecan.request.storage_conn.get_meter_statistics(
                f, period, g, aggregate)
            LOG.debug(_('computed value coming from %r'),
                      pecan.request.storage_conn)

            return [Statistics(start_timestamp=start,
                               end_timestamp=end,
                               **c.as_dict())
                    for c in computed]
        except OverflowError as e:
            params = dict(period=period, err=e)
            raise base.ClientSideError(
                _("Invalid period %(period)s: %(err)s") % params)
Beispiel #29
0
def build_token_values(token_data):
    token_values = {
        'expires_at': timeutils.normalize_time(
            timeutils.parse_isotime(token_data['expires_at'])),
        'issued_at': timeutils.normalize_time(
            timeutils.parse_isotime(token_data['issued_at']))}

    user = token_data.get('user')
    if user is not None:
        token_values['user_id'] = user['id']
        token_values['identity_domain_id'] = user['domain']['id']
    else:
        token_values['user_id'] = None
        token_values['identity_domain_id'] = None

    project = token_data.get('project', token_data.get('tenant'))
    if project is not None:
        token_values['project_id'] = project['id']
        token_values['assignment_domain_id'] = project['domain']['id']
    else:
        token_values['project_id'] = None
        token_values['assignment_domain_id'] = None

    role_list = []
    roles = token_data.get('roles')
    if roles is not None:
        for role in roles:
            role_list.append(role['id'])
    token_values['roles'] = role_list

    trust = token_data.get('OS-TRUST:trust')
    if trust is None:
        token_values['trust_id'] = None
        token_values['trustor_id'] = None
        token_values['trustee_id'] = None
    else:
        token_values['trust_id'] = trust['id']
        token_values['trustor_id'] = trust['trustor_user']['id']
        token_values['trustee_id'] = trust['trustee_user']['id']

    oauth1 = token_data.get('OS-OAUTH1')
    if oauth1 is None:
        token_values['consumer_id'] = None
        token_values['access_token_id'] = None
    else:
        token_values['consumer_id'] = oauth1['consumer_id']
        token_values['access_token_id'] = oauth1['access_token_id']
    return token_values
    def test_token_model_v3(self):
        token_data = token_model.KeystoneToken(uuid.uuid4().hex, self.v3_sample_token)
        self.assertIs(token_model.V3, token_data.version)
        expires = timeutils.normalize_time(timeutils.parse_isotime(self.v3_sample_token["token"]["expires_at"]))
        issued = timeutils.normalize_time(timeutils.parse_isotime(self.v3_sample_token["token"]["issued_at"]))
        self.assertEqual(expires, token_data.expires)
        self.assertEqual(issued, token_data.issued)
        self.assertEqual(self.v3_sample_token["token"]["user"]["id"], token_data.user_id)
        self.assertEqual(self.v3_sample_token["token"]["user"]["name"], token_data.user_name)
        self.assertEqual(self.v3_sample_token["token"]["user"]["domain"]["id"], token_data.user_domain_id)
        self.assertEqual(self.v3_sample_token["token"]["user"]["domain"]["name"], token_data.user_domain_name)
        self.assertEqual(self.v3_sample_token["token"]["project"]["domain"]["id"], token_data.project_domain_id)
        self.assertEqual(self.v3_sample_token["token"]["project"]["domain"]["name"], token_data.project_domain_name)
        self.assertEqual(self.v3_sample_token["token"]["OS-TRUST:trust"]["id"], token_data.trust_id)
        self.assertEqual(self.v3_sample_token["token"]["OS-TRUST:trust"]["trustor_user_id"], token_data.trustor_user_id)
        self.assertEqual(self.v3_sample_token["token"]["OS-TRUST:trust"]["trustee_user_id"], token_data.trustee_user_id)
        # Project Scoped Token
        self.assertRaises(exception.UnexpectedError, getattr, token_data, "domain_id")
        self.assertRaises(exception.UnexpectedError, getattr, token_data, "domain_name")
        self.assertFalse(token_data.domain_scoped)
        self.assertEqual(self.v3_sample_token["token"]["project"]["id"], token_data.project_id)
        self.assertEqual(self.v3_sample_token["token"]["project"]["name"], token_data.project_name)
        self.assertTrue(token_data.project_scoped)
        self.assertTrue(token_data.scoped)
        self.assertTrue(token_data.trust_scoped)
        self.assertEqual([r["id"] for r in self.v3_sample_token["token"]["roles"]], token_data.role_ids)
        self.assertEqual([r["name"] for r in self.v3_sample_token["token"]["roles"]], token_data.role_names)
        token_data.pop("project")
        self.assertFalse(token_data.project_scoped)
        self.assertFalse(token_data.scoped)
        self.assertRaises(exception.UnexpectedError, getattr, token_data, "project_id")
        self.assertRaises(exception.UnexpectedError, getattr, token_data, "project_name")
        self.assertFalse(token_data.project_scoped)
        domain_id = uuid.uuid4().hex
        domain_name = uuid.uuid4().hex
        token_data["domain"] = {"id": domain_id, "name": domain_name}
        self.assertEqual(domain_id, token_data.domain_id)
        self.assertEqual(domain_name, token_data.domain_name)
        self.assertTrue(token_data.domain_scoped)

        token_data["audit_ids"] = [uuid.uuid4().hex]
        self.assertEqual(token_data.audit_id, token_data["audit_ids"][0])
        self.assertEqual(token_data.audit_chain_id, token_data["audit_ids"][0])
        token_data["audit_ids"].append(uuid.uuid4().hex)
        self.assertEqual(token_data.audit_chain_id, token_data["audit_ids"][1])
        del token_data["audit_ids"]
        self.assertIsNone(token_data.audit_id)
        self.assertIsNone(token_data.audit_chain_id)
Beispiel #31
0
 def sample(self, ctxt, publisher_id, event_type, payload, metadata):
     events = [
         models.Event(message_id=ev['message_id'],
                      event_type=ev['event_type'],
                      generated=timeutils.normalize_time(
                          timeutils.parse_isotime(ev['generated'])),
                      traits=[
                          models.Trait(
                              name, dtype,
                              models.Trait.convert_value(dtype, value))
                          for name, dtype, value in ev['traits']
                      ],
                      raw=ev.get('raw', {})) for ev in payload
         if publisher_utils.verify_signature(
             ev, cfg.CONF.publisher.telemetry_secret)
     ]
     try:
         with self.publish_context as p:
             p(events)
     except Exception:
         if not cfg.CONF.notification.ack_on_event_error:
             return oslo_messaging.NotificationResult.REQUEUE
         raise
     return oslo_messaging.NotificationResult.HANDLED
Beispiel #32
0
    def check_delete_complete(self, deleted_at):
        if self.resource_id is None:
            return True

        try:
            node = self.node(self.lb())
        except (NotFound, LoadbalancerDeleted, NodeNotFound):
            return True

        if isinstance(deleted_at, six.string_types):
            deleted_at = timeutils.parse_isotime(deleted_at)

        deleted_at = timeutils.normalize_time(deleted_at)
        waited = timeutils.utcnow() - deleted_at
        timeout_secs = self.properties[self.DRAINING_TIMEOUT]
        timeout_secs = datetime.timedelta(seconds=timeout_secs)

        if waited > timeout_secs:
            try:
                node.delete()
            except NotFound:
                return True
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise
        elif node.condition != self.DRAINING:
            node.condition = self.DRAINING
            try:
                node.update()
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise

        return False
Beispiel #33
0
    def handle_sample(self, context, s):
        """Handle a sample, converting if necessary."""
        LOG.debug('handling sample %s', s)
        key = s.name + s.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(s.timestamp)
        self.cache[key] = (s.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # disallow violations of the arrow of time
            if time_delta < 0:
                LOG.warning(_('dropping out of time order sample: %s'), (s,))
                # Reset the cache to the newer sample.
                self.cache[key] = prev
                return None
            # we only allow negative volume deltas for noncumulative
            # samples, whereas for cumulative we assume that a reset has
            # occurred in the interim so that the current volume gives a
            # lower bound on growth
            volume_delta = (s.volume - prev_volume
                            if (prev_volume <= s.volume or
                                s.type != sample.TYPE_CUMULATIVE)
                            else s.volume)
            rate_of_change = ((1.0 * volume_delta / time_delta)
                              if time_delta else 0.0)

            s = self._convert(s, rate_of_change)
            LOG.debug('converted to: %s', s)
        else:
            LOG.warning(_('dropping sample with no predecessor: %s'),
                        (s,))
            s = None
        return s
Beispiel #34
0
    def _check_status_complete(self, started_at, wait_secs):
        def simulated_effort():
            client_name = self.properties[self.CLIENT_NAME]
            self.entity = self.properties[self.ENTITY_NAME]
            if client_name and self.entity:
                # Allow the user to set the value to a real resource id.
                entity_id = self.data().get('value') or self.resource_id
                try:
                    obj = getattr(self.client(name=client_name), self.entity)
                    obj.get(entity_id)
                except Exception as exc:
                    LOG.debug('%s.%s(%s) %s' % (client_name, self.entity,
                                                entity_id, six.text_type(exc)))
            else:
                # just sleep some more
                eventlet.sleep(1)

        if isinstance(started_at, six.string_types):
            started_at = timeutils.parse_isotime(started_at)

        started_at = timeutils.normalize_time(started_at)
        waited = timeutils.utcnow() - started_at
        LOG.info(_LI("Resource %(name)s waited %(waited)s/%(sec)s seconds"),
                 {'name': self.name,
                  'waited': waited,
                  'sec': wait_secs})

        # wait_secs < 0 is an infinite wait time.
        if wait_secs >= 0 and waited > datetime.timedelta(seconds=wait_secs):
            fail_prop = self.properties[self.FAIL]
            if fail_prop and self.action != self.DELETE:
                raise ValueError("Test Resource failed %s" % self.name)
            return True

        simulated_effort()
        return False
Beispiel #35
0
    def test_create_token(self):

        token_client = self.non_admin_token_client

        # get a token for the user
        creds = self.os.credentials
        username = creds.username
        password = creds.password
        tenant_name = creds.tenant_name

        body = token_client.auth(username, password, tenant_name)

        self.assertNotEmpty(body['token']['id'])
        self.assertIsInstance(body['token']['id'], six.string_types)

        now = timeutils.utcnow()
        expires_at = timeutils.normalize_time(
            timeutils.parse_isotime(body['token']['expires']))
        self.assertGreater(expires_at, now)

        self.assertEqual(body['token']['tenant']['id'], creds.tenant_id)
        self.assertEqual(body['token']['tenant']['name'], tenant_name)

        self.assertEqual(body['user']['id'], creds.user_id)
Beispiel #36
0
    def authenticate(self, request, auth_info, auth_context):
        """Turn a signed request with an access key into a keystone token."""
        oauth_headers = oauth.get_oauth_headers(request.headers)
        access_token_id = oauth_headers.get('oauth_token')

        if not access_token_id:
            raise exception.ValidationError(attribute='oauth_token',
                                            target='request')

        acc_token = self.oauth_api.get_access_token(access_token_id)

        expires_at = acc_token['expires_at']
        if expires_at:
            now = timeutils.utcnow()
            expires = timeutils.normalize_time(
                timeutils.parse_isotime(expires_at))
            if now > expires:
                raise exception.Unauthorized(_('Access token is expired'))

        url = controller.V3Controller.base_url(request.context_dict,
                                               request.path_info)
        access_verifier = oauth.ResourceEndpoint(
            request_validator=validator.OAuthValidator(),
            token_generator=oauth.token_generator)
        result, request = access_verifier.validate_protected_resource_request(
            url,
            http_method='POST',
            body=request.context_dict['query_string'],
            headers=request.headers,
            realms=None)
        if not result:
            msg = _('Could not validate the access token')
            raise exception.Unauthorized(msg)
        auth_context['user_id'] = acc_token['authorizing_user_id']
        auth_context['access_token_id'] = access_token_id
        auth_context['project_id'] = acc_token['project_id']
    def _get_castellan_object(self, secret):
        """Creates a Castellan managed object given the Barbican secret.

        :param secret: the secret from barbican with the payload of data
        :returns: the castellan object
        """
        secret_type = op_data.OpaqueData
        for castellan_type, barbican_type in self._secret_type_dict.items():
            if barbican_type == secret.secret_type:
                secret_type = castellan_type

        secret_data = self._get_secret_data(secret)

        # convert created ISO8601 in Barbican to POSIX
        if secret.created:
            time_stamp = timeutils.parse_isotime(str(
                secret.created)).timetuple()
            created = calendar.timegm(time_stamp)

        if issubclass(secret_type, key_base_class.Key):
            return secret_type(secret.algorithm, secret.bit_length,
                               secret_data, secret.name, created)
        else:
            return secret_type(secret_data, secret.name, created)
Beispiel #38
0
    def test_replace_ok(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time

        new_image = 'rc_example_B_image'
        response = self.get_json('/rcs/%s/%s' %
                                 (self.rc.uuid, self.rc.bay_uuid))
        self.assertNotEqual(new_image, response['images'][0])

        response = self.patch_json(
            '/rcs/%s/%s' % (self.rc.uuid, self.rc.bay_uuid), [{
                'path': '/images/0',
                'value': new_image,
                'op': 'replace'
            }])
        self.assertEqual('application/json', response.content_type)
        self.assertEqual(200, response.status_code)

        response = self.get_json('/rcs/%s/%s' %
                                 (self.rc.uuid, self.rc.bay_uuid))
        self.assertEqual(new_image, response['images'][0])
        return_updated_at = timeutils.parse_isotime(
            response['updated_at']).replace(tzinfo=None)
        self.assertEqual(test_time, return_updated_at)
Beispiel #39
0
    def test_replace_ok(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time

        new_goal = 'BALANCE_LOAD'
        response = self.get_json('/audit_templates/%s' %
                                 self.audit_template.uuid)
        self.assertNotEqual(new_goal, response['goal'])

        response = self.patch_json(
            '/audit_templates/%s' % self.audit_template.uuid, [{
                'path': '/goal',
                'value': new_goal,
                'op': 'replace'
            }])
        self.assertEqual('application/json', response.content_type)
        self.assertEqual(200, response.status_code)

        response = self.get_json('/audit_templates/%s' %
                                 self.audit_template.uuid)
        self.assertEqual(new_goal, response['goal'])
        return_updated_at = timeutils.parse_isotime(
            response['updated_at']).replace(tzinfo=None)
        self.assertEqual(test_time, return_updated_at)
Beispiel #40
0
    def test_replace_goal_uuid_by_name(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time

        new_goal_uuid = self.fake_goal2.uuid
        response = self.get_json(
            urlparse.quote('/audit_templates/%s' % self.audit_template.name))
        self.assertNotEqual(new_goal_uuid, response['goal_uuid'])

        response = self.patch_json(
            '/audit_templates/%s' % self.audit_template.name, [{
                'path': '/goal',
                'value': new_goal_uuid,
                'op': 'replace'
            }])
        self.assertEqual('application/json', response.content_type)
        self.assertEqual(200, response.status_code)

        response = self.get_json('/audit_templates/%s' %
                                 self.audit_template.name)
        self.assertEqual(new_goal_uuid, response['goal_uuid'])
        return_updated_at = timeutils.parse_isotime(
            response['updated_at']).replace(tzinfo=None)
        self.assertEqual(test_time, return_updated_at)
Beispiel #41
0
 def _change_since_result_filter_hook(self, query, filters):
     # this block is for change_since query
     # we get the changed_since string from filters.
     # And translate it from string to datetime type.
     # Then compare with the timestamp in db which has
     # datetime type.
     values = filters and filters.get(CHANGED_SINCE, [])
     if not values:
         return query
     data = filters[CHANGED_SINCE][0]
     try:
         changed_since_string = timeutils.parse_isotime(data)
     except Exception:
         msg = _LW("The input %s must be in the "
                   "following format: YYYY-MM-DDTHH:MM:SSZ") % CHANGED_SINCE
         raise n_exc.InvalidInput(error_message=msg)
     changed_since = (timeutils.normalize_time(changed_since_string))
     target_model_class = query.column_descriptions[0]['type']
     query = query.join(
         standard_attr.StandardAttribute,
         target_model_class.standard_attr_id ==
         standard_attr.StandardAttribute.id).filter(
             standard_attr.StandardAttribute.updated_at >= changed_since)
     return query
Beispiel #42
0
    def test_trusted_filter_update_cache_timezone(self, req_mock):
        oat_data = {
            "hosts": [{
                "host_name": "node1",
                "trust_lvl": "untrusted",
                "vtime": "2012-09-09T05:10:40-04:00"
            }]
        }
        req_mock.return_value = requests.codes.OK, oat_data
        extra_specs = {'trust:trusted_host': 'untrusted'}
        filter_properties = {
            'context': mock.sentinel.ctx,
            'instance_type': {
                'memory_mb': 1024,
                'extra_specs': extra_specs
            }
        }
        host = fakes.FakeHostState('host1', 'node1', {})

        timeutils.set_time_override(
            timeutils.normalize_time(
                timeutils.parse_isotime("2012-09-09T09:10:40Z")))

        self.filt_cls.host_passes(host, filter_properties)  # Fill the caches

        req_mock.reset_mock()
        self.filt_cls.host_passes(host, filter_properties)
        self.assertFalse(req_mock.called)

        req_mock.reset_mock()
        timeutils.advance_time_seconds(
            CONF.trusted_computing.attestation_auth_timeout - 10)
        self.filt_cls.host_passes(host, filter_properties)
        self.assertFalse(req_mock.called)

        timeutils.clear_time_override()
Beispiel #43
0
    def _validate_data_type(self, target):
        if not self.values:
            msg = ("Rule '%(rule)s' contains empty value")
            raise exception.ValidationError(msg % {"rule": self})

        special_attribute = self._attribute_special_field(target)
        if special_attribute:
            attribute_info = target.get(special_attribute)
        else:
            attribute_info = target.get(self.attribute)

        for value in self.values:
            error = False
            if attribute_info[1] == 'string' and not isinstance(
                    value, six.string_types):
                error = True
            elif attribute_info[1] == 'number':
                if not strutils.is_int_like(value):
                    error = True
            elif attribute_info[1] == 'uuid':
                if not uuidutils.is_uuid_like(value):
                    error = True
            elif attribute_info[1] == 'datetime':
                try:
                    timeutils.parse_isotime(value)
                except ValueError:
                    error = True
            elif attribute_info[1] == 'enum':
                if value not in attribute_info[3]:
                    msg = ("Rule '%(rule)s' contains data type '%(type)s' "
                           "with invalid value. It should be one of "
                           "%(valid_value)s")
                    raise exception.ValidationError(
                        msg % {
                            "rule": self,
                            "valid_value": ",".join(attribute_info[3]),
                            'type': attribute_info[1]
                        })

            if error:
                msg = ("Rule '%(rule)s' contains invalid data type for value "
                       "'%(value)s'. The data type should be '%(type)s'")
                raise exception.ValidationError(msg % {
                    "rule": self,
                    "value": value,
                    'type': attribute_info[1]
                })

            # Also, check whether the data type is supported by operator
            if attribute_info[1] not in \
                    self.OPERATOR_SUPPORTED_DATA_TYPES.get(self.operator):
                msg = ("Rule '%(rule)s' contains operator '%(operator)s' "
                       "which doesn't support data type '%(type)s' for "
                       "attribute '%(attribute)s'")
                raise exception.ValidationError(
                    msg % {
                        "rule": self,
                        "operator": self.operator,
                        'type': attribute_info[1],
                        'attribute': self.attribute
                    })
Beispiel #44
0
    def get_version(cls, payload, timestamp=None, preferred_date_field=None):
        """Combine <preferred_date_field>|updated_at|created_at epoch with
        notification timestamp as a version number, format is
        <right 9 digits update epoch(create epoch)><right 9 digits timestamp
        in milliseconds>, if timestamp is not present(sync indexing), fill in
        9 digits of zero instead.

        The reason we combine update time and timestamp together is the
        precision of update time is limit to seconds. It's not accurate enough
        to use as a version.

        The max version in Elasticsearch is 9.2e+18, allowing 19 digits at
        most. Our version is 18 digits long, leaves 1 digit for conservation.

        The updated epoch is 10 digits long, we strip off its leading digit,
        concatenate it with the right 9 digits of timestamp in milliseconds,
        and we get a 18 digits long version.

        The truncating has some potential things to be noted, similar to Y2K
        problem.

        Let's say we have an updated epoch 1450161655. Stripping off the
        leading '1' from the current epoch seconds 'rebases' our epoch around
        1984(450161655). By the time we get to an updated epoch beginning '199'
        we're somewhere around 2033, and truncating epoch to 2001. Once the
        clock flips round to begin '200'(year 2033) things will stop working
        because we'll suddenly be using epoch that look like they're from 1969.
        We can address this before that happens; worst case is that you'd have
        to reindex everything, or reset the version.

        The "timestamp" has similiar issues. When the "timestamp" overflowed
        the 9-digit field, time becomes indistinguishable. The 9 digits
        millisecond precision gives us around 27 hours. It should be enough to
        distinguish notifications with different timestamps.
        """
        updated = None

        # Pick the best/preferred date field to calculate version from
        date_fields = ('updated_at', 'created_at')
        if preferred_date_field:
            date_fields = (preferred_date_field,) + date_fields

        for date_field in date_fields:
            if date_field and payload.get(date_field):
                updated = payload.get(date_field)
                break
        else:
            date_fields_str = ', '.join(date_fields)
            msg = ('Failed to build elasticsearch version; none of %s'
                   'found in payload: %s' % (date_fields_str, payload))
            raise exception.SearchlightException(message=msg)

        updated_obj = timeutils.parse_isotime(updated)
        updated_epoch = int(calendar.timegm(updated_obj.utctimetuple()))
        if timestamp:
            timestamp_obj = timeutils.parse_isotime(timestamp)
            timestamp_epoch = int(calendar.timegm(
                timestamp_obj.utctimetuple()))
            timestamp_milli = (timestamp_epoch * 1000 +
                               timestamp_obj.microsecond // 1000)
            truncate_timestamp = str(timestamp_milli)[-9:].zfill(9)
            # truncate updated epoch because we are run out of numbers.
            return '%s%s' % (str(updated_epoch)[-9:], truncate_timestamp)
        else:
            return '%s%s' % (str(updated_epoch)[-9:], '0' * 9)
 def _convert_time_string(date_time_string):
     dt = timeutils.parse_isotime(date_time_string)
     dt = timeutils.normalize_time(dt)
     return dt
Beispiel #46
0
 def test_dt_deserializer(self):
     dt = timeutils.parse_isotime('1955-11-05T00:00:00Z')
     self.assertEqual(dt, utils.dt_deserializer(timeutils.isotime(dt)))
     self.assertIsNone(utils.dt_deserializer(None))
     self.assertRaises(ValueError, utils.dt_deserializer, 'foo')
    def test_stored_data_processing(self, get_mock, store_mock):
        cfg.CONF.set_override('store_data', 'swift', 'processing')

        # ramdisk data copy
        # please mind the data is changed during processing
        ramdisk_data = json.dumps(copy.deepcopy(self.data))
        get_mock.return_value = ramdisk_data

        self.call_introspect(self.uuid)
        eventlet.greenthread.sleep(DEFAULT_SLEEP)
        self.cli.node.set_power_state.assert_called_once_with(self.uuid,
                                                              'reboot')

        res = self.call_continue(self.data)
        self.assertEqual({'uuid': self.uuid}, res)
        eventlet.greenthread.sleep(DEFAULT_SLEEP)

        status = self.call_get_status(self.uuid)
        inspect_started_at = timeutils.parse_isotime(status['started_at'])
        self.check_status(status, finished=True, state=istate.States.finished)

        res = self.call_reapply(self.uuid)
        self.assertEqual(202, res.status_code)
        self.assertEqual('', res.text)
        eventlet.greenthread.sleep(DEFAULT_SLEEP)

        status = self.call_get_status(self.uuid)
        self.check_status(status, finished=True, state=istate.States.finished)

        # checks the started_at updated in DB is correct
        reapply_started_at = timeutils.parse_isotime(status['started_at'])
        self.assertLess(inspect_started_at, reapply_started_at)

        # reapply request data
        get_mock.assert_called_once_with(self.uuid,
                                         suffix='UNPROCESSED')

        # store ramdisk data, store processing result data, store
        # reapply processing result data; the ordering isn't
        # guaranteed as store ramdisk data runs in a background
        # thread; hower, last call has to always be reapply processing
        # result data
        store_ramdisk_call = mock.call(mock.ANY, self.uuid,
                                       suffix='UNPROCESSED')
        store_processing_call = mock.call(mock.ANY, self.uuid,
                                          suffix=None)
        self.assertEqual(3, len(store_mock.call_args_list))
        self.assertIn(store_ramdisk_call,
                      store_mock.call_args_list[0:2])
        self.assertIn(store_processing_call,
                      store_mock.call_args_list[0:2])
        self.assertEqual(store_processing_call,
                         store_mock.call_args_list[2])

        # second reapply call
        get_mock.return_value = ramdisk_data
        res = self.call_reapply(self.uuid)
        self.assertEqual(202, res.status_code)
        self.assertEqual('', res.text)
        eventlet.greenthread.sleep(DEFAULT_SLEEP)

        # reapply saves the result
        self.assertEqual(4, len(store_mock.call_args_list))
        self.assertEqual(store_processing_call,
                         store_mock.call_args_list[-1])
Beispiel #48
0
    def _get_servers(self, req, is_detail):
        """Returns a list of servers, based on any search options specified."""

        search_opts = {}
        search_opts.update(req.GET)

        context = req.environ['nova.context']
        remove_invalid_options(context, search_opts,
                               self._get_server_search_options())

        # Verify search by 'status' contains a valid status.
        # Convert it to filter by vm_state or task_state for compute_api.
        search_opts.pop('status', None)
        if 'status' in req.GET.keys():
            statuses = req.GET.getall('status')
            states = common.task_and_vm_state_from_status(statuses)
            vm_state, task_state = states
            if not vm_state and not task_state:
                return {'servers': []}
            search_opts['vm_state'] = vm_state
            # When we search by vm state, task state will return 'default'.
            # So we don't need task_state search_opt.
            if 'default' not in task_state:
                search_opts['task_state'] = task_state

        if 'changes-since' in search_opts:
            try:
                parsed = timeutils.parse_isotime(search_opts['changes-since'])
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
            search_opts['changes-since'] = parsed

        # By default, compute's get_all() will return deleted instances.
        # If an admin hasn't specified a 'deleted' search option, we need
        # to filter out deleted instances by setting the filter ourselves.
        # ... Unless 'changes-since' is specified, because 'changes-since'
        # should return recently deleted images according to the API spec.

        if 'deleted' not in search_opts:
            if 'changes-since' not in search_opts:
                # No 'changes-since', so we only want non-deleted servers
                search_opts['deleted'] = False

        if search_opts.get("vm_state") == ['deleted']:
            if context.is_admin:
                search_opts['deleted'] = True
            else:
                msg = _("Only administrators may list deleted instances")
                raise exc.HTTPForbidden(explanation=msg)

        # If tenant_id is passed as a search parameter this should
        # imply that all_tenants is also enabled unless explicitly
        # disabled. Note that the tenant_id parameter is filtered out
        # by remove_invalid_options above unless the requestor is an
        # admin.

        # TODO(gmann): 'all_tenants' flag should not be required while
        # searching with 'tenant_id'. Ref bug# 1185290
        # +microversions to achieve above mentioned behavior by
        # uncommenting below code.

        # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
        # We do not need to add the all_tenants flag if the tenant
        # id associated with the token is the tenant id
        # specified. This is done so a request that does not need
        # the all_tenants flag does not fail because of lack of
        # policy permission for compute:get_all_tenants when it
        # doesn't actually need it.
        # if context.project_id != search_opts.get('tenant_id'):
        #    search_opts['all_tenants'] = 1

        # If all tenants is passed with 0 or false as the value
        # then remove it from the search options. Nothing passed as
        # the value for all_tenants is considered to enable the feature
        all_tenants = search_opts.get('all_tenants')
        if all_tenants:
            try:
                if not strutils.bool_from_string(all_tenants, True):
                    del search_opts['all_tenants']
            except ValueError as err:
                raise exception.InvalidInput(six.text_type(err))

        if 'all_tenants' in search_opts:
            policy.enforce(context, 'compute:get_all_tenants', {
                'project_id': context.project_id,
                'user_id': context.user_id
            })
            del search_opts['all_tenants']
        else:
            if context.project_id:
                search_opts['project_id'] = context.project_id
            else:
                search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        sort_keys, sort_dirs = common.get_sort_params(req.params)
        try:
            instance_list = self.compute_api.get_all(
                context,
                search_opts=search_opts,
                limit=limit,
                marker=marker,
                want_objects=True,
                expected_attrs=['pci_devices'],
                sort_keys=sort_keys,
                sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found ", search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
Beispiel #49
0
def _convert_time_string(date_time_string):
    dt = timeutils.parse_isotime(date_time_string)
    dt = timeutils.normalize_time(dt)
    timestamp = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
    return timestamp
Beispiel #50
0
    def _check_node_firmware_update(self, task):
        """Check the progress of running firmware update on a node."""

        node = task.node

        firmware_updates = node.driver_internal_info['firmware_updates']
        current_update = firmware_updates[0]

        try:
            update_service = redfish_utils.get_update_service(node)
        except exception.RedfishConnectionError as e:
            # If the BMC firmware is being updated, the BMC will be
            # unavailable for some amount of time.
            LOG.warning(
                'Unable to communicate with firmware update service '
                'on node %(node)s. Will try again on the next poll. '
                'Error: %(error)s', {
                    'node': node.uuid,
                    'error': e
                })
            return

        wait_start_time = current_update.get('wait_start_time')
        if wait_start_time:
            wait_start = timeutils.parse_isotime(wait_start_time)

            elapsed_time = timeutils.utcnow(True) - wait_start
            if elapsed_time.seconds >= current_update['wait']:
                LOG.debug(
                    'Finished waiting after firmware update '
                    '%(firmware_image)s on node %(node)s. '
                    'Elapsed time: %(seconds)s seconds', {
                        'firmware_image': current_update['url'],
                        'node': node.uuid,
                        'seconds': elapsed_time.seconds
                    })
                current_update.pop('wait', None)
                current_update.pop('wait_start_time', None)

                task.upgrade_lock()
                self._continue_firmware_updates(task, update_service,
                                                firmware_updates)
            else:
                LOG.debug(
                    'Continuing to wait after firmware update '
                    '%(firmware_image)s on node %(node)s. '
                    'Elapsed time: %(seconds)s seconds', {
                        'firmware_image': current_update['url'],
                        'node': node.uuid,
                        'seconds': elapsed_time.seconds
                    })

            return

        try:
            task_monitor = update_service.get_task_monitor(
                current_update['task_monitor'])
        except sushy.exceptions.ResourceNotFoundError:
            # The BMC deleted the Task before we could query it
            LOG.warning(
                'Firmware update completed for node %(node)s, '
                'firmware %(firmware_image)s, but success of the '
                'update is unknown.  Assuming update was successful.', {
                    'node': node.uuid,
                    'firmware_image': current_update['url']
                })
            task.upgrade_lock()
            self._continue_firmware_updates(task, update_service,
                                            firmware_updates)
            return

        if not task_monitor.is_processing:
            # The last response does not necessarily contain a Task,
            # so get it
            sushy_task = task_monitor.get_task()

            # Only parse the messages if the BMC did not return parsed
            # messages
            messages = []
            if not sushy_task.messages[0].message:
                sushy_task.parse_messages()

            messages = [m.message for m in sushy_task.messages]

            if (sushy_task.task_state == sushy.TASK_STATE_COMPLETED
                    and sushy_task.task_status
                    in [sushy.HEALTH_OK, sushy.HEALTH_WARNING]):
                LOG.info(
                    'Firmware update succeeded for node %(node)s, '
                    'firmware %(firmware_image)s: %(messages)s', {
                        'node': node.uuid,
                        'firmware_image': current_update['url'],
                        'messages': ", ".join(messages)
                    })

                task.upgrade_lock()
                self._continue_firmware_updates(task, update_service,
                                                firmware_updates)
            else:
                error_msg = (_('Firmware update failed for node %(node)s, '
                               'firmware %(firmware_image)s. '
                               'Error: %(errors)s') % {
                                   'node': node.uuid,
                                   'firmware_image': current_update['url'],
                                   'errors': ",  ".join(messages)
                               })
                LOG.error(error_msg)

                task.upgrade_lock()
                self._clear_firmware_updates(node)
                manager_utils.cleaning_error_handler(task, error_msg)
        else:
            LOG.debug(
                'Firmware update in progress for node %(node)s, '
                'firmware %(firmware_image)s.', {
                    'node': node.uuid,
                    'firmware_image': current_update['url']
                })
Beispiel #51
0
def _convert_timestamps_to_datetimes(image_meta):
    """Returns image with timestamp fields converted to datetime objects."""
    for attr in ['created_at', 'updated_at', 'deleted_at']:
        if image_meta.get(attr):
            image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
    return image_meta
def _parse_and_normalize_time(time_data):
    if isinstance(time_data, six.string_types):
        time_data = timeutils.parse_isotime(time_data)
    return timeutils.normalize_time(time_data)
Beispiel #53
0
def iso2dt(iso_date):
    """iso8601 format to datetime."""
    iso_dt = timeutils.parse_isotime(iso_date)
    trans_dt = timeutils.normalize_time(iso_dt)
    return trans_dt
Beispiel #54
0
def build_token_values(token_data):

    token_expires_at = timeutils.parse_isotime(token_data['expires_at'])

    # Trim off the microseconds because the revocation event only has
    # expirations accurate to the second.
    token_expires_at = token_expires_at.replace(microsecond=0)

    token_values = {
        'expires_at':
        timeutils.normalize_time(token_expires_at),
        'issued_at':
        timeutils.normalize_time(
            timeutils.parse_isotime(token_data['issued_at'])),
        'audit_id':
        token_data.get('audit_ids', [None])[0],
        'audit_chain_id':
        token_data.get('audit_ids', [None])[-1],
    }

    user = token_data.get('user')
    if user is not None:
        token_values['user_id'] = user['id']
        # Federated users do not have a domain, be defensive and get the user
        # domain set to None in the federated user case.
        token_values['identity_domain_id'] = user.get('domain', {}).get('id')
    else:
        token_values['user_id'] = None
        token_values['identity_domain_id'] = None

    project = token_data.get('project', token_data.get('tenant'))
    if project is not None:
        token_values['project_id'] = project['id']
        token_values['assignment_domain_id'] = project['domain']['id']
    else:
        token_values['project_id'] = None

        domain = token_data.get('domain')
        if domain is not None:
            token_values['assignment_domain_id'] = domain['id']
        else:
            token_values['assignment_domain_id'] = None

    role_list = []
    roles = token_data.get('roles')
    if roles is not None:
        for role in roles:
            role_list.append(role['id'])
    token_values['roles'] = role_list

    trust = token_data.get('OS-TRUST:trust')
    if trust is None:
        token_values['trust_id'] = None
        token_values['trustor_id'] = None
        token_values['trustee_id'] = None
    else:
        token_values['trust_id'] = trust['id']
        token_values['trustor_id'] = trust['trustor_user']['id']
        token_values['trustee_id'] = trust['trustee_user']['id']

    oauth1 = token_data.get('OS-OAUTH1')
    if oauth1 is None:
        token_values['consumer_id'] = None
        token_values['access_token_id'] = None
    else:
        token_values['consumer_id'] = oauth1['consumer_id']
        token_values['access_token_id'] = oauth1['access_token_id']
    return token_values
Beispiel #55
0
    def _get_servers(self, req, is_detail):
        """Returns a list of servers, based on any search options specified."""

        search_opts = {}
        search_opts.update(req.GET)

        context = req.environ['nova.context']
        remove_invalid_options(context, search_opts,
                               self._get_server_search_options())

        # Verify search by 'status' contains a valid status.
        # Convert it to filter by vm_state or task_state for compute_api.
        search_opts.pop('status', None)
        if 'status' in req.GET.keys():
            statuses = req.GET.getall('status')
            states = common.task_and_vm_state_from_status(statuses)
            vm_state, task_state = states
            if not vm_state and not task_state:
                return {'servers': []}
            search_opts['vm_state'] = vm_state
            # When we search by vm state, task state will return 'default'.
            # So we don't need task_state search_opt.
            if 'default' not in task_state:
                search_opts['task_state'] = task_state

        if 'changes-since' in search_opts:
            try:
                parsed = timeutils.parse_isotime(search_opts['changes-since'])
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
            search_opts['changes-since'] = parsed

        # By default, compute's get_all() will return deleted instances.
        # If an admin hasn't specified a 'deleted' search option, we need
        # to filter out deleted instances by setting the filter ourselves.
        # ... Unless 'changes-since' is specified, because 'changes-since'
        # should return recently deleted images according to the API spec.

        if 'deleted' not in search_opts:
            if 'changes-since' not in search_opts:
                # No 'changes-since', so we only want non-deleted servers
                search_opts['deleted'] = False

        if search_opts.get("vm_state") == ['deleted']:
            if context.is_admin:
                search_opts['deleted'] = True
            else:
                msg = _("Only administrators may list deleted instances")
                raise exc.HTTPForbidden(explanation=msg)

        # If all tenants is passed with 0 or false as the value
        # then remove it from the search options. Nothing passed as
        # the value for all_tenants is considered to enable the feature
        all_tenants = search_opts.get('all_tenants')
        if all_tenants:
            try:
                if not strutils.bool_from_string(all_tenants, True):
                    del search_opts['all_tenants']
            except ValueError as err:
                raise exception.InvalidInput(six.text_type(err))

        if 'all_tenants' in search_opts:
            policy.enforce(context, 'compute:get_all_tenants', {
                'project_id': context.project_id,
                'user_id': context.user_id
            })
            del search_opts['all_tenants']
        else:
            if context.project_id:
                search_opts['project_id'] = context.project_id
            else:
                search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        # Sorting by multiple keys and directions is conditionally enabled
        sort_keys, sort_dirs = None, None
        if self.ext_mgr.is_loaded('os-server-sort-keys'):
            sort_keys, sort_dirs = common.get_sort_params(req.params)
        try:
            instance_list = self.compute_api.get_all(context,
                                                     search_opts=search_opts,
                                                     limit=limit,
                                                     marker=marker,
                                                     want_objects=True,
                                                     sort_keys=sort_keys,
                                                     sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
Beispiel #56
0
 def from_primitive(self, obj, attr, value):
     return self.coerce(obj, attr, timeutils.parse_isotime(value))
Beispiel #57
0
 def expires(self):
     return timeutils.parse_isotime(self.expires_str)
Beispiel #58
0
    def create_access_token(self, request):
        oauth_headers = oauth1.get_oauth_headers(request.headers)
        consumer_id = oauth_headers.get('oauth_consumer_key')
        request_token_id = oauth_headers.get('oauth_token')
        oauth_verifier = oauth_headers.get('oauth_verifier')

        if not consumer_id:
            raise exception.ValidationError(attribute='oauth_consumer_key',
                                            target='request')
        if not request_token_id:
            raise exception.ValidationError(attribute='oauth_token',
                                            target='request')
        if not oauth_verifier:
            raise exception.ValidationError(attribute='oauth_verifier',
                                            target='request')

        req_token = self.oauth_api.get_request_token(request_token_id)

        expires_at = req_token['expires_at']
        if expires_at:
            now = timeutils.utcnow()
            expires = timeutils.normalize_time(
                timeutils.parse_isotime(expires_at))
            if now > expires:
                raise exception.Unauthorized(_('Request token is expired'))

        access_verifier = oauth1.AccessTokenEndpoint(
            request_validator=validator.OAuthValidator(),
            token_generator=oauth1.token_generator)
        try:
            h, b, s = access_verifier.create_access_token_response(
                request.url,
                http_method='POST',
                body=request.params,
                headers=request.headers)
        except NotImplementedError:
            # Client key or request token validation failed, since keystone
            # does not yet support dummy client or dummy request token,
            # so we will raise Unauthorized exception instead.
            try:
                self.oauth_api.get_consumer(consumer_id)
            except exception.NotFound:
                msg = _('Provided consumer does not exist.')
                LOG.warning(msg)
                raise exception.Unauthorized(message=msg)
            if req_token['consumer_id'] != consumer_id:
                msg = _('Provided consumer key does not match stored '
                        'consumer key.')
                LOG.warning(msg)
                raise exception.Unauthorized(message=msg)
        # The response body is empty since either one of the following reasons
        if not b:
            if req_token['verifier'] != oauth_verifier:
                msg = _('Provided verifier does not match stored verifier')
            else:
                msg = _('Invalid signature.')
            LOG.warning(msg)
            raise exception.Unauthorized(message=msg)
        # show the details of the failure.
        oauth1.validate_oauth_params(b)
        if not req_token.get('authorizing_user_id'):
            msg = _('Request Token does not have an authorizing user id.')
            LOG.warning(msg)
            raise exception.Unauthorized(message=msg)

        access_token_duration = CONF.oauth1.access_token_duration
        token_ref = self.oauth_api.create_access_token(
            request_token_id,
            access_token_duration,
            initiator=request.audit_initiator)

        result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s' % {
            'key': token_ref['id'],
            'secret': token_ref['access_secret']
        })

        if CONF.oauth1.access_token_duration > 0:
            expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at'])
            result += expiry_bit

        headers = [('Content-Type', 'application/x-www-form-urlencoded')]
        response = wsgi.render_response(
            result,
            status=(http_client.CREATED,
                    http_client.responses[http_client.CREATED]),
            headers=headers)

        return response
Beispiel #59
0
 def issued(self):
     return timeutils.parse_isotime(self.issued_str)
Beispiel #60
0
def validate_expiration(token):
    token_expiration_datetime = timeutils.normalize_time(
        timeutils.parse_isotime(token.expires_at)
    )
    if timeutils.utcnow() > token_expiration_datetime:
        raise exception.Unauthorized(_('Federation token is expired'))