def make_test_data(conn, start, end, interval, event_types):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events')
    n = 0
    while timestamp <= end:
        data = []
        for i in range(event_types):
            traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
                      models.Trait('id2_%d' % i, 2, random.randint(1,10)),
                      models.Trait('id3_%d' % i, 3, random.random()),
                      models.Trait('id4_%d' % i, 4, timestamp)]
            data.append(models.Event(str(uuid.uuid4()),
                                     'event_type%d' % i,
                                     timestamp,
                                     traits))
            n += 1
        conn.record_events(data)
        timestamp = timestamp + increment
    print('Added %d new events' % n)
Beispiel #2
0
def make_test_data(conn, start, end, interval, event_types):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events')
    n = 0
    while timestamp <= end:
        data = []
        for i in range(event_types):
            traits = [
                models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
                models.Trait('id2_%d' % i, 2, random.randint(1, 10)),
                models.Trait('id3_%d' % i, 3, random.random()),
                models.Trait('id4_%d' % i, 4, timestamp)
            ]
            data.append(
                models.Event(str(uuid.uuid4()), 'event_type%d' % i, timestamp,
                             traits))
            n += 1
        conn.record_events(data)
        timestamp = timestamp + increment
    print('Added %d new events' % n)
Beispiel #3
0
def make_test_data(conn, name, meter_type, unit, volume, random_min,
                   random_max, user_id, project_id, resource_id, start,
                   end, interval, resource_metadata={}, source='artificial',):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)


    print('Adding new events for meter %s.' % (name))
    # Generate events
    n = 0
    total_volume = volume
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)


        c = sample.Sample(name=name,
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=resource_id,
                          timestamp=timestamp,
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c,
            cfg.CONF.publisher.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    print('Added %d new events for meter %s.' % (n, name))
def make_test_data(conn, name, meter_type, unit, volume, random_min,
                   random_max, user_id, project_id, resource_id, start,
                   end, interval, resource_metadata={}, source='artificial',):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events for meter %s.' % (name))
    # Generate events
    n = 0
    total_volume = volume
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)

        c = sample.Sample(name=name,
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=resource_id,
                          timestamp=timestamp,
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c,
            cfg.CONF.publisher.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    print('Added %d new events for meter %s.' % (n, name))
Beispiel #5
0
 def test_return_valid_isoformat(self):
     """Ensure that the ec2 api returns datetime in xs:dateTime
        (which apparently isn't datetime.isoformat())
        NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
     """
     conv = apirequest._database_to_isoformat
     # sqlite database representation with microseconds
     time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
                                               "%Y-%m-%d %H:%M:%S.%f")
     self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
     # mysqlite database representation
     time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
                                               "%Y-%m-%d %H:%M:%S")
     self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
Beispiel #6
0
 def assertValidISO8601ExtendedFormatDatetime(self, dt):
     try:
         return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
     except Exception:
         msg = "%s is not a valid ISO 8601 extended format date time." % dt
         raise AssertionError(msg)
     self.assertIsInstance(dt, datetime.datetime)
Beispiel #7
0
    def instance_update(self, context, instance_uuid, updates, service):
        for key, value in updates.iteritems():
            if key not in allowed_updates:
                LOG.error(
                    _LE("Instance update attempted for "
                        "'%(key)s' on %(instance_uuid)s"), {
                            'key': key,
                            'instance_uuid': instance_uuid
                        })
                raise KeyError("unexpected update keyword '%s'" % key)
            if key in datetime_fields and isinstance(value, six.string_types):
                updates[key] = timeutils.parse_strtime(value)

        # NOTE(danms): the send_update() call below is going to want to know
        # about the flavor, so we need to join the appropriate things here,
        # and objectify the result.
        old_ref, instance_ref = self.db.instance_update_and_get_original(
            context,
            instance_uuid,
            updates,
            columns_to_join=['system_metadata'])
        inst_obj = objects.Instance._from_db_object(
            context,
            objects.Instance(),
            instance_ref,
            expected_attrs=['system_metadata'])
        notifications.send_update(context, old_ref, inst_obj, service)
        return jsonutils.to_primitive(instance_ref)
Beispiel #8
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 service_catalog=None,
                 **kwargs):
        """Initialize RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(_LW('Arguments dropped when creating context: %s'),
                     str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if service_catalog:
            self.service_catalog = [
                s for s in service_catalog
                if s.get('type') in ('compute', 'volume')
            ]
        else:
            self.service_catalog = []

        if not request_id:
            request_id = common_context.generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token
        self.quota_class = quota_class
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Beispiel #9
0
 def assertValidISO8601ExtendedFormatDatetime(self, dt):
     try:
         return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
     except Exception:
         msg = '%s is not a valid ISO 8601 extended format date time.' % dt
         raise AssertionError(msg)
     self.assertIsInstance(dt, datetime.datetime)
Beispiel #10
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, remote_address=None, timestamp=None,
                 request_id=None, auth_token=None, overwrite=True,
                 quota_class=None, user_name=None, project_name=None,
                 service_catalog=None, instance_lock_checked=False,
                 user_auth_plugin=None, **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.

           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param user_auth_plugin: The auth plugin for the current request's
                authentication data.

           :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warning(_LW('Arguments dropped when creating context: %s') %
                        str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = context.generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        self.user_auth_plugin = user_auth_plugin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Beispiel #11
0
 def parse_strtime(strtime):
     if _ms_time_regex.match(strtime):
         # NOTE(MotoKen): time format for aws-sdk-java contains millisecond
         time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
     else:
         time_format = "%Y-%m-%dT%H:%M:%SZ"
     return timeutils.parse_strtime(strtime, time_format)
Beispiel #12
0
 def is_up(self, service_ref):
     """Moved from nova.utils
     Check whether a service is up based on last heartbeat.
     """
     last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service_ref came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     # Timestamps in DB are UTC.
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= self.service_down_time
     if not is_up:
         LOG.debug(
             'Seems service is down. Last heartbeat was %(lhb)s. '
             'Elapsed time is %(el)s', {
                 'lhb': str(last_heartbeat),
                 'el': str(elapsed)
             })
     return is_up
Beispiel #13
0
 def _get_sample(message, name):
     try:
         for metric in message['payload']['metrics']:
             if name == metric['name']:
                 info = {
                     'payload':
                     metric,
                     'event_type':
                     message['event_type'],
                     'publisher_id':
                     message['publisher_id'],
                     'resource_id':
                     '%s_%s' % (message['payload']['host'],
                                message['payload']['nodename']),
                     'timestamp':
                     str(timeutils.parse_strtime(metric['timestamp']))
                 }
                 return info
     except Exception as err:
         LOG.warning(
             _('An error occurred while building %(m)s '
               'sample: %(e)s') % {
                   'm': name,
                   'e': err
               })
Beispiel #14
0
def fake_vpn_instance():
    return {
        'id': 7,
        'image_ref': CONF.vpn_image_id,
        'vm_state': 'active',
        'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
        'uuid': uuid,
        'project_id': project_id,
    }
Beispiel #15
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, project_name=None, remote_address=None,
                 timestamp=None, request_id=None, auth_token=None,
                 overwrite=True, quota_class=None, service_catalog=None,
                 domain=None, user_domain=None, project_domain=None,
                 **kwargs):
        """Initialize RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """

        super(RequestContext, self).__init__(auth_token=auth_token,
                                             user=user_id,
                                             tenant=project_id,
                                             domain=domain,
                                             user_domain=user_domain,
                                             project_domain=project_domain,
                                             is_admin=is_admin,
                                             request_id=request_id)
        self.roles = roles or []
        self.project_name = project_name
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        self.quota_class = quota_class
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                                    if s.get('type') in
                                    ('identity', 'compute', 'object-store')]
        else:
            # if list is empty or none
            self.service_catalog = []

        # We need to have RequestContext attributes defined
        # when policy.check_is_admin invokes request logging
        # to make it loggable.
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
Beispiel #16
0
 def report_state(self, context, **kwargs):
     """Report state from agent to server."""
     time = kwargs['time']
     time = timeutils.parse_strtime(time)
     if self.START_TIME > time:
         LOG.debug("Message with invalid timestamp received")
         return
     agent_state = kwargs['agent_state']['agent_state']
     if not self.plugin:
         self.plugin = manager.NeutronManager.get_plugin()
     self.plugin.create_or_update_agent(context, agent_state)
Beispiel #17
0
 def report_state(self, context, **kwargs):
     """Report state from agent to server."""
     time = kwargs['time']
     time = timeutils.parse_strtime(time)
     if self.START_TIME > time:
         LOG.debug("Message with invalid timestamp received")
         return
     agent_state = kwargs['agent_state']['agent_state']
     if not self.plugin:
         self.plugin = manager.NeutronManager.get_plugin()
     self.plugin.create_or_update_agent(context, agent_state)
Beispiel #18
0
    def _get_most_recent_update(self, versions):
        recent = None
        for version in versions:
            updated = timeutils.parse_strtime(version['updated'],
                                              '%Y-%m-%dT%H:%M:%SZ')
            if not recent:
                recent = updated
            elif updated > recent:
                recent = updated

        return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
Beispiel #19
0
    def _get_most_recent_update(self, versions):
        recent = None
        for version in versions:
            updated = timeutils.parse_strtime(version['updated'],
                                              '%Y-%m-%dT%H:%M:%SZ')
            if not recent:
                recent = updated
            elif updated > recent:
                recent = updated

        return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
Beispiel #20
0
    def test_create_trust(self):
        expires_at = timeutils.strtime(timeutils.utcnow() + datetime.timedelta(minutes=10), fmt=TIME_FORMAT)
        new_trust = self.create_trust(self.sample_data, self.trustor["name"], expires_at=expires_at)
        self.assertEqual(self.trustor["id"], new_trust["trustor_user_id"])
        self.assertEqual(self.trustee["id"], new_trust["trustee_user_id"])
        role_ids = [self.role_browser["id"], self.role_member["id"]]
        self.assertTrue(timeutils.parse_strtime(new_trust["expires_at"], fmt=TIME_FORMAT))
        self.assertIn("%s/v3/OS-TRUST/" % HOST_URL, new_trust["links"]["self"])
        self.assertIn("%s/v3/OS-TRUST/" % HOST_URL, new_trust["roles_links"]["self"])

        for role in new_trust["roles"]:
            self.assertIn(role["id"], role_ids)
Beispiel #21
0
    def instance_update(self, context, instance_uuid, updates, service):
        for key, value in updates.iteritems():
            if key not in allowed_updates:
                LOG.error(
                    _LE("Instance update attempted for " "'%(key)s' on %(instance_uuid)s"),
                    {"key": key, "instance_uuid": instance_uuid},
                )
                raise KeyError("unexpected update keyword '%s'" % key)
            if key in datetime_fields and isinstance(value, six.string_types):
                updates[key] = timeutils.parse_strtime(value)

        old_ref, instance_ref = self.db.instance_update_and_get_original(context, instance_uuid, updates)
        notifications.send_update(context, old_ref, instance_ref, service)
        return jsonutils.to_primitive(instance_ref)
Beispiel #22
0
    def instance_update(self, context, instance_uuid,
                        updates, service):
        for key, value in updates.iteritems():
            if key not in allowed_updates:
                LOG.error(_("Instance update attempted for "
                            "'%(key)s' on %(instance_uuid)s"),
                          {'key': key, 'instance_uuid': instance_uuid})
                raise KeyError("unexpected update keyword '%s'" % key)
            if key in datetime_fields and isinstance(value, six.string_types):
                updates[key] = timeutils.parse_strtime(value)

        old_ref, instance_ref = self.db.instance_update_and_get_original(
            context, instance_uuid, updates)
        notifications.send_update(context, old_ref, instance_ref, service)
        return jsonutils.to_primitive(instance_ref)
Beispiel #23
0
 def _get_sample(message, name):
     try:
         for metric in message['payload']['metrics']:
             if name == metric['name']:
                 info = {'payload': metric,
                         'event_type': message['event_type'],
                         'publisher_id': message['publisher_id'],
                         'resource_id': '%s_%s' % (
                             message['payload']['host'],
                             message['payload']['nodename']),
                         'timestamp': str(timeutils.parse_strtime(
                             metric['timestamp']))}
                 return info
     except Exception as err:
         LOG.warning(_('An error occurred while building %(m)s '
                       'sample: %(e)s') % {'m': name, 'e': err})
Beispiel #24
0
    def test_create_trust(self):
        expires_at = timeutils.strtime(timeutils.utcnow() +
                                       datetime.timedelta(minutes=10),
                                       fmt=TIME_FORMAT)
        new_trust = self.create_trust(self.sample_data, self.trustor['name'],
                                      expires_at=expires_at)
        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
        role_ids = [self.role_browser['id'], self.role_member['id']]
        self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'],
                                                fmt=TIME_FORMAT))
        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
                      new_trust['links']['self'])
        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
                      new_trust['roles_links']['self'])

        for role in new_trust['roles']:
            self.assertIn(role['id'], role_ids)
Beispiel #25
0
    def test_create_trust(self):
        expires_at = timeutils.strtime(timeutils.utcnow() +
                                       datetime.timedelta(minutes=10),
                                       fmt=TIME_FORMAT)
        new_trust = self.create_trust(self.sample_data,
                                      self.trustor['name'],
                                      expires_at=expires_at)
        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
        role_ids = [self.role_browser['id'], self.role_member['id']]
        self.assertTrue(
            timeutils.parse_strtime(new_trust['expires_at'], fmt=TIME_FORMAT))
        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, new_trust['links']['self'])
        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
                      new_trust['roles_links']['self'])

        for role in new_trust['roles']:
            self.assertIn(role['id'], role_ids)
Beispiel #26
0
 def is_up(self, service_ref):
     """Moved from nova.utils
     Check whether a service is up based on last heartbeat.
     """
     last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service_ref came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     # Timestamps in DB are UTC.
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= self.service_down_time
     if not is_up:
         LOG.debug('Seems service is down. Last heartbeat was %(lhb)s. '
                   'Elapsed time is %(el)s',
                   {'lhb': str(last_heartbeat), 'el': str(elapsed)})
     return is_up
Beispiel #27
0
    def instance_update(self, context, instance_uuid,
                        updates, service):
        for key, value in updates.iteritems():
            if key not in allowed_updates:
                LOG.error(_LE("Instance update attempted for "
                              "'%(key)s' on %(instance_uuid)s"),
                          {'key': key, 'instance_uuid': instance_uuid})
                raise KeyError("unexpected update keyword '%s'" % key)
            if key in datetime_fields and isinstance(value, six.string_types):
                updates[key] = timeutils.parse_strtime(value)

        # NOTE(danms): the send_update() call below is going to want to know
        # about the flavor, so we need to join the appropriate things here,
        # and objectify the result.
        old_ref, instance_ref = self.db.instance_update_and_get_original(
            context, instance_uuid, updates,
            columns_to_join=['system_metadata'])
        inst_obj = objects.Instance._from_db_object(
            context, objects.Instance(),
            instance_ref, expected_attrs=['system_metadata'])
        notifications.send_update(context, old_ref, inst_obj, service)
        return jsonutils.to_primitive(instance_ref)
Beispiel #28
0
    def _update_cache_entry(self, state):
        entry = {}

        host = state['host_name']
        entry['trust_lvl'] = state['trust_lvl']

        try:
            # Normalize as naive object to interoperate with utcnow().
            entry['vtime'] = timeutils.normalize_time(
                            timeutils.parse_isotime(state['vtime']))
        except ValueError:
            try:
                # Mt. Wilson does not necessarily return an ISO8601 formatted
                # `vtime`, so we should try to parse it as a string formatted
                # datetime.
                vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
                entry['vtime'] = timeutils.normalize_time(vtime)
            except ValueError:
                # Mark the system as un-trusted if get invalid vtime.
                entry['trust_lvl'] = 'unknown'
                entry['vtime'] = timeutils.utcnow()

        self.compute_nodes[host] = entry
Beispiel #29
0
    def _update_cache_entry(self, state):
        entry = {}

        host = state['host_name']
        entry['trust_lvl'] = state['trust_lvl']

        try:
            # Normalize as naive object to interoperate with utcnow().
            entry['vtime'] = timeutils.normalize_time(
                timeutils.parse_isotime(state['vtime']))
        except ValueError:
            try:
                # Mt. Wilson does not necessarily return an ISO8601 formatted
                # `vtime`, so we should try to parse it as a string formatted
                # datetime.
                vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
                entry['vtime'] = timeutils.normalize_time(vtime)
            except ValueError:
                # Mark the system as un-trusted if get invalid vtime.
                entry['trust_lvl'] = 'unknown'
                entry['vtime'] = timeutils.utcnow()

        self.compute_nodes[host] = entry
def fake_vpn_instance():
    return {
        'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
        'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
        'uuid': uuid, 'project_id': project_id,
    }
Beispiel #31
0
    def _get_share_networks(self, req, is_detail=True):
        """Returns a list of share networks."""
        context = req.environ['manila.context']
        search_opts = {}
        search_opts.update(req.GET)

        if ('all_tenants' in search_opts
                or ('project_id' in search_opts
                    and search_opts['project_id'] != context.project_id)):
            policy.check_policy(context, RESOURCE_NAME,
                                'get_all_share_networks')

        if 'security_service_id' in search_opts:
            networks = db_api.share_network_get_all_by_security_service(
                context, search_opts['security_service_id'])
        elif ('project_id' in search_opts
              and search_opts['project_id'] != context.project_id):
            networks = db_api.share_network_get_all_by_project(
                context, search_opts['project_id'])
        elif 'all_tenants' in search_opts:
            networks = db_api.share_network_get_all(context)
        else:
            networks = db_api.share_network_get_all_by_project(
                context, context.project_id)

        date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.'''
        if 'created_since' in search_opts:
            try:
                created_since = timeutils.parse_strtime(
                    search_opts['created_since'], fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts['created_since']
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [
                network for network in networks
                if network['created_at'] >= created_since
            ]
        if 'created_before' in search_opts:
            try:
                created_before = timeutils.parse_strtime(
                    search_opts['created_before'], fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts['created_before']
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [
                network for network in networks
                if network['created_at'] <= created_before
            ]
        opts_to_remove = [
            'all_tenants',
            'created_since',
            'created_before',
            'limit',
            'offset',
            'security_service_id',
        ]
        for opt in opts_to_remove:
            search_opts.pop(opt, None)
        if search_opts:
            for key, value in six.iteritems(search_opts):
                if key in ['ip_version', 'segmentation_id']:
                    value = int(value)
                networks = [
                    network for network in networks if network[key] == value
                ]

        limited_list = common.limited(networks, req)
        return self._view_builder.build_share_networks(limited_list, is_detail)
Beispiel #32
0
 def _get_datetime_from_filename(self, timestamp_filename):
     ts = timestamp_filename.lstrip(TIMESTAMP_PREFIX)
     return timeutils.parse_strtime(ts, fmt=TIMESTAMP_FORMAT)
Beispiel #33
0
def parse_strtime(dstr, fmt):
    try:
        return timeutils.parse_strtime(dstr, fmt)
    except (TypeError, ValueError) as e:
        raise exception.InvalidStrTime(reason=six.text_type(e))
def make_test_data(conn, name, meter_type, unit, volume, random_min,
                   random_max, user_id, project_id, resource_id, start,
                   end, interval, resource_metadata={}, source='artificial',):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)


    print('Adding new events for meter %s.' % (name))
    # Generate events
    n = 0
    total_volume = volume
    meter_names = ["meter" + name + str(i) for i in range(1, 50, 1)]
    resource_ids = ["resource" + resource_id + str(i) for i in range(1, 500, 1)]

    id = threading.current_thread().ident

    print("id, curr_sampl_count, avg, s")

    t0 = time.time()
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)


        c = sample.Sample(name=random.choice(meter_names),
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=random.choice(resource_ids),
                          timestamp=timestamp,
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c,
            cfg.CONF.publisher.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment
        t1 = time.time()
        if not n % 1000:
            print ("%d, %d, %f, %f" % (id, get_current_sample_count(conn), (n / (t1 - t0)), t1))

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    t1 = time.time()
    totaltime = t1 - t0
    print ("%d, %d, %f, %f" % (id, get_current_sample_count(conn), (n / (t1 - t0)), t1))

    print('Id %d Added %d samples total time %f sec avg: %f samples/sec ts: %f' % (id, n, totaltime, (n / totaltime), t1))
Beispiel #35
0
 def test_strtime_and_back(self):
     orig_t = datetime.datetime(1997, 8, 29, 6, 14, 0)
     s = timeutils.strtime(orig_t)
     t = timeutils.parse_strtime(s)
     self.assertEqual(orig_t, t)
Beispiel #36
0
    def _get_share_networks(self, req, is_detail=True):
        """Returns a list of share networks."""
        context = req.environ['manila.context']
        search_opts = {}
        search_opts.update(req.GET)

        if ('all_tenants' in search_opts or
            ('project_id' in search_opts and
             search_opts['project_id'] != context.project_id)):
                policy.check_policy(context, RESOURCE_NAME,
                                    'get_all_share_networks')

        if 'security_service_id' in search_opts:
            networks = db_api.share_network_get_all_by_security_service(
                context, search_opts['security_service_id'])
        elif ('project_id' in search_opts and
              search_opts['project_id'] != context.project_id):
            networks = db_api.share_network_get_all_by_project(
                context, search_opts['project_id'])
        elif 'all_tenants' in search_opts:
            networks = db_api.share_network_get_all(context)
        else:
            networks = db_api.share_network_get_all_by_project(
                context,
                context.project_id)

        date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.'''
        if 'created_since' in search_opts:
            try:
                created_since = timeutils.parse_strtime(
                    search_opts['created_since'],
                    fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts['created_since']
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [network for network in networks
                        if network['created_at'] >= created_since]
        if 'created_before' in search_opts:
            try:
                created_before = timeutils.parse_strtime(
                    search_opts['created_before'],
                    fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts['created_before']
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [network for network in networks
                        if network['created_at'] <= created_before]
        opts_to_remove = [
            'all_tenants',
            'created_since',
            'created_before',
            'limit',
            'offset',
            'security_service_id',
        ]
        for opt in opts_to_remove:
            search_opts.pop(opt, None)
        if search_opts:
            for key, value in six.iteritems(search_opts):
                if key in ['ip_version', 'segmentation_id']:
                    value = int(value)
                networks = [network for network in networks
                            if network[key] == value]

        limited_list = common.limited(networks, req)
        return self._view_builder.build_share_networks(limited_list, is_detail)
def parse_strtime(dstr, fmt):
    try:
        return timeutils.parse_strtime(dstr, fmt)
    except (TypeError, ValueError) as e:
        raise exception.InvalidStrTime(reason=six.text_type(e))
Beispiel #38
0
def convert_datetimes(values, *datetime_keys):
    for key in values:
        if key in datetime_keys and isinstance(values[key], basestring):
            values[key] = timeutils.parse_strtime(values[key])
    return values
Beispiel #39
0
 def test_parse_strtime(self):
     perfect_time_format = self.skynet_self_aware_time_perfect_str
     expect = timeutils.parse_strtime(perfect_time_format)
     self.assertEqual(self.skynet_self_aware_time_perfect, expect)
 def test_strtime_and_back(self):
     orig_t = datetime.datetime(1997, 8, 29, 6, 14, 0)
     s = timeutils.strtime(orig_t)
     t = timeutils.parse_strtime(s)
     self.assertEqual(orig_t, t)
 def test_parse_strtime(self):
     perfect_time_format = self.skynet_self_aware_time_perfect_str
     expect = timeutils.parse_strtime(perfect_time_format)
     self.assertEqual(self.skynet_self_aware_time_perfect, expect)
Beispiel #42
0
from oslo.utils import timeutils
from six.moves.urllib import parse
from webob import exc as webob_exc

from manila.api.v1 import share_networks
from manila.db import api as db_api
from manila import exception
from manila import quota
from manila import test
from manila.tests.api import fakes


fake_share_network = {
    'id': 'fake network id',
    'project_id': 'fake project',
    'created_at': timeutils.parse_strtime('2002-02-02', fmt="%Y-%m-%d"),
    'updated_at': None,
    'neutron_net_id': 'fake net id',
    'neutron_subnet_id': 'fake subnet id',
    'network_type': 'vlan',
    'segmentation_id': 1000,
    'cidr': '10.0.0.0/24',
    'ip_version': 4,
    'name': 'fake name',
    'description': 'fake description',
    'share_servers': [],
    'security_services': []
}

fake_share_network_shortened = {
    'id': 'fake network id',
Beispiel #43
0
 def _get_datetime_from_filename(self, timestamp_filename):
     ts = timestamp_filename.lstrip(TIMESTAMP_PREFIX)
     return timeutils.parse_strtime(ts, fmt=TIMESTAMP_FORMAT)
Beispiel #44
0
from oslo.db import exception as db_exception
from oslo.utils import timeutils
from six.moves.urllib import parse
from webob import exc as webob_exc

from manila.api.v1 import share_networks
from manila.db import api as db_api
from manila import exception
from manila import quota
from manila import test
from manila.tests.api import fakes

fake_share_network = {
    'id': 'fake network id',
    'project_id': 'fake project',
    'created_at': timeutils.parse_strtime('2002-02-02', fmt="%Y-%m-%d"),
    'updated_at': None,
    'neutron_net_id': 'fake net id',
    'neutron_subnet_id': 'fake subnet id',
    'network_type': 'vlan',
    'segmentation_id': 1000,
    'cidr': '10.0.0.0/24',
    'ip_version': 4,
    'name': 'fake name',
    'description': 'fake description',
    'share_servers': [],
    'security_services': []
}

fake_share_network_shortened = {
    'id': 'fake network id',
def make_test_data(
    conn,
    name,
    meter_type,
    unit,
    volume,
    random_min,
    random_max,
    user_id,
    project_id,
    resource_id,
    start,
    end,
    interval,
    resource_metadata={},
    source='artificial',
):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events for meter %s.' % (name))
    # Generate events
    n = 0
    total_volume = volume
    meter_names = ["meter" + name + str(i) for i in range(1, 50, 1)]
    resource_ids = [
        "resource" + resource_id + str(i) for i in range(1, 500, 1)
    ]

    id = threading.current_thread().ident

    print("id, curr_sampl_count, avg, s")

    t0 = time.time()
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)

        c = sample.Sample(
            name=random.choice(meter_names),
            type=meter_type,
            unit=unit,
            volume=total_volume,
            user_id=user_id,
            project_id=project_id,
            resource_id=random.choice(resource_ids),
            timestamp=timestamp,
            resource_metadata=resource_metadata,
            source=source,
        )
        data = utils.meter_message_from_counter(
            c, cfg.CONF.publisher.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment
        t1 = time.time()
        if not n % 1000:
            print("%d, %d, %f, %f" % (id, get_current_sample_count(conn),
                                      (n / (t1 - t0)), t1))

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    t1 = time.time()
    totaltime = t1 - t0
    print("%d, %d, %f, %f" % (id, get_current_sample_count(conn),
                              (n / (t1 - t0)), t1))

    print(
        'Id %d Added %d samples total time %f sec avg: %f samples/sec ts: %f' %
        (id, n, totaltime, (n / totaltime), t1))
Beispiel #46
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 user_name=None,
                 project_name=None,
                 service_catalog=None,
                 instance_lock_checked=False,
                 **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.


           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(
                _('Arguments dropped when creating context: %s') % str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [
                s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2')
            ]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = False
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Beispiel #47
0
 def _to_datetime(self, obj):
     return timeutils.parse_strtime(obj)
Beispiel #48
0
 def _to_datetime(self, obj):
     return timeutils.parse_strtime(obj)
def make_test_data(
    conn,
    name,
    meter_type,
    unit,
    volume,
    random_min,
    random_max,
    user_id,
    project_id,
    resource_id,
    start,
    end,
    interval,
    resource_metadata={},
    source='artificial',
):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    # Generate events
    n = 0
    total_volume = volume
    meter_names = ["meter" + name + str(i) for i in range(1, 50, 1)]
    resource_ids = [
        "resource" + resource_id + str(i) for i in range(1, 500, 1)
    ]

    id = threading.current_thread().ident

    t0 = time.time()
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)

        c = sample.Sample(
            name=random.choice(meter_names),
            type=meter_type,
            unit=unit,
            volume=total_volume,
            user_id=user_id,
            project_id=project_id,
            resource_id=random.choice(resource_ids),
            timestamp=timestamp,
            resource_metadata=resource_metadata,
            source=source,
        )
        data = utils.meter_message_from_counter(
            c, cfg.CONF.publisher.metering_secret)
        #conn.record_metering_data(data)

        print(
            "INSERT INTO sample (meter_id, user_id, project_id, resource_id, resource_metadata, "
            "volume, timestamp, recorded_at, message_signature, message_id, source_id) "
            "VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');"
            %
            (169L, 'usr123', 'proj123', random.choice(resource_ids), '{}',
             total_volume, timestamp, datetime.datetime.now(),
             '6bbcaf2ad4389bd63a93ac34aef66492928acb4e84fbb6529c11daac2ce472fc',
             '30e66cd8-4dbb-11e4-89e6-78e7d122ff05', 'artificial'))

        n += 1
        timestamp = timestamp + increment
        t1 = time.time()

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    t1 = time.time()
    totaltime = t1 - t0