Example #1
0
    def _verify_export_location_structure(self, export_locations,
                                          role='admin'):
        expected_keys = [
            'created_at', 'updated_at', 'path', 'uuid',
        ]
        if role == 'admin':
            expected_keys.extend(['is_admin_only', 'share_instance_id'])

        if not isinstance(export_locations, (list, tuple, set)):
            export_locations = (export_locations, )

        for export_location in export_locations:
            self.assertEqual(len(expected_keys), len(export_location))
            for key in expected_keys:
                self.assertIn(key, export_location)
            if role == 'admin':
                self.assertIn(export_location['is_admin_only'], (True, False))
                self.assertTrue(
                    uuidutils.is_uuid_like(
                        export_location['share_instance_id']))
            self.assertTrue(uuidutils.is_uuid_like(export_location['uuid']))
            self.assertTrue(
                isinstance(export_location['path'], six.string_types))
            for time in (export_location['created_at'],
                         export_location['updated_at']):
                # If var 'time' has incorrect value then ValueError exception
                # is expected to be raised. So, just try parse it making
                # assertion that it has proper date value.
                timeutils.parse_strtime(time)
Example #2
0
    def test_parse_input_params_success(self, mock_get_user_id,
                                        mock_is_authenticated, mock_get_input):
        fmt = '%Y-%m-%d %H:%M:%S'
        self.CONF.set_override('input_date_format', fmt, 'api')
        raw_filters = {
            const.START_DATE: '2015-03-26 15:04:40',
            const.END_DATE: '2015-03-26 15:04:50',
            const.CPID: '12345',
            const.SIGNED: True
        }

        expected_params = mock.Mock()
        mock_get_input.return_value = raw_filters

        parsed_start_date = timeutils.parse_strtime(
            raw_filters[const.START_DATE], fmt)

        parsed_end_date = timeutils.parse_strtime(raw_filters[const.END_DATE],
                                                  fmt)

        expected_result = {
            const.START_DATE: parsed_start_date,
            const.END_DATE: parsed_end_date,
            const.CPID: '12345',
            const.SIGNED: True,
            const.OPENID: 'fake_id',
        }

        result = api_utils.parse_input_params(expected_params)
        self.assertEqual(expected_result, result)

        mock_get_input.assert_called_once_with(expected_params)
Example #3
0
    def test_parse_input_params_success(
        self, mock_get_user_pubkeys, mock_get_user_id, mock_is_authenticated, mock_get_input
    ):
        fmt = "%Y-%m-%d %H:%M:%S"
        self.CONF.set_override("input_date_format", fmt, "api")
        raw_filters = {
            const.START_DATE: "2015-03-26 15:04:40",
            const.END_DATE: "2015-03-26 15:04:50",
            const.CPID: "12345",
            const.SIGNED: True,
        }
        fake_pubkeys = ({"format": "fake", "pubkey": "fake_pk"},)
        mock_get_user_pubkeys.return_value = fake_pubkeys
        expected_params = mock.Mock()
        mock_get_input.return_value = raw_filters

        parsed_start_date = timeutils.parse_strtime(raw_filters[const.START_DATE], fmt)

        parsed_end_date = timeutils.parse_strtime(raw_filters[const.END_DATE], fmt)

        expected_result = {
            const.START_DATE: parsed_start_date,
            const.END_DATE: parsed_end_date,
            const.CPID: "12345",
            const.SIGNED: True,
            const.OPENID: "fake_id",
            const.USER_PUBKEYS: ["fake fake_pk"],
        }

        result = api_utils.parse_input_params(expected_params)
        self.assertEqual(result, expected_result)

        mock_get_input.assert_called_once_with(expected_params)
Example #4
0
def make_test_data(conn, start, end, interval, event_types):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events')
    n = 0
    while timestamp <= end:
        data = []
        for i in range(event_types):
            traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
                      models.Trait('id2_%d' % i, 2, random.randint(1, 10)),
                      models.Trait('id3_%d' % i, 3, random.random()),
                      models.Trait('id4_%d' % i, 4, timestamp)]
            data.append(models.Event(str(uuid.uuid4()),
                                     'event_type%d' % i,
                                     timestamp,
                                     traits,
                                     {}))
            n += 1
        conn.record_events(data)
        timestamp = timestamp + increment
    print('Added %d new events' % n)
def make_test_data(conn, start, end, interval, event_types):

    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events')
    n = 0
    while timestamp <= end:
        data = []
        for i in range(event_types):
            traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
                      models.Trait('id2_%d' % i, 2, random.randint(1, 10)),
                      models.Trait('id3_%d' % i, 3, random.random()),
                      models.Trait('id4_%d' % i, 4, timestamp)]
            data.append(models.Event(str(uuid.uuid4()),
                                     'event_type%d' % i,
                                     timestamp,
                                     traits))
            n += 1
        conn.record_events(data)
        timestamp = timestamp + increment
    print('Added %d new events' % n)
Example #6
0
    def test_parse_input_params_success(self, mock_get_input):
        fmt = '%Y-%m-%d %H:%M:%S'
        self.CONF.set_override('input_date_format',
                               fmt,
                               'api')
        raw_filters = {
            const.START_DATE: '2015-03-26 15:04:40',
            const.END_DATE: '2015-03-26 15:04:50',
            const.CPID: '12345',
        }

        expected_params = mock.Mock()
        mock_get_input.return_value = raw_filters

        parsed_start_date = timeutils.parse_strtime(
            raw_filters[const.START_DATE],
            fmt
        )

        parsed_end_date = timeutils.parse_strtime(
            raw_filters[const.END_DATE],
            fmt
        )

        expected_result = {
            const.START_DATE: parsed_start_date,
            const.END_DATE: parsed_end_date,
            const.CPID: '12345'
        }

        result = api_utils.parse_input_params(expected_params)
        self.assertEqual(result, expected_result)

        mock_get_input.assert_called_once_with(expected_params)
Example #7
0
def make_test_data(conf, name, meter_type, unit, volume, random_min,
                   random_max, user_id, project_id, resource_id, start,
                   end, interval, resource_metadata=None, source='artificial'):
    resource_metadata = resource_metadata or {'display_name': 'toto',
                                              'host': 'tata',
                                              'image_ref': 'test',
                                              'instance_flavor_id': 'toto',
                                              'server_group': 'toto',
                                              }
    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new samples for meter %s.' % (name))
    # Generate samples
    n = 0
    total_volume = volume
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)

        c = sample.Sample(name=name,
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=resource_id,
                          timestamp=timestamp.isoformat(),
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c, conf.publisher.telemetry_secret)
        # timestamp should be string when calculating signature, but should be
        # datetime object when calling record_metering_data.
        data['timestamp'] = timestamp
        yield data
        n += 1
        timestamp = timestamp + increment

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    print('Added %d new samples for meter %s.' % (n, name))
Example #8
0
def make_test_data(name, meter_type, unit, user_id, project_id,
                   start, end, interval,
                   volume, random_min=-1, random_max=-1,
                   resources_count=1, resource_metadata=None,
                   source='artificial', **kwargs):
    resource_metadata = resource_metadata or {'display_name': 'toto',
                                              'host': 'tata',
                                              'image_ref': 'test',
                                              'instance_flavor_id': 'toto',
                                              'server_group': 'toto',
                                              }
    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(seconds=interval)
    resources = [str(uuid.uuid4()) for _ in moves.xrange(resources_count)]
    print('Adding new samples for meter %s.' % (name))
    # Generate samples
    n = 0
    total_volume = volume
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)
        resource_id = resources[random.randint(0, len(resources) - 1)]
        c = sample.Sample(name=name,
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=resource_id,
                          timestamp=timestamp.isoformat(),
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c, cfg.CONF.publisher.telemetry_secret)
        yield data
        n += 1
        timestamp = timestamp + increment

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    print('Added %d new samples for meter %s.' % (n, name))
    def _verify_export_location_structure(
            self, export_locations, role='admin', version=LATEST_MICROVERSION,
            format='summary'):

        # Determine which keys to expect based on role, version and format
        summary_keys = ['id', 'path']
        if utils.is_microversion_ge(version, '2.14'):
            summary_keys += ['preferred']

        admin_summary_keys = summary_keys + [
            'share_instance_id', 'is_admin_only']

        detail_keys = summary_keys + ['created_at', 'updated_at']

        admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at']

        if format == 'summary':
            if role == 'admin':
                expected_keys = admin_summary_keys
            else:
                expected_keys = summary_keys
        else:
            if role == 'admin':
                expected_keys = admin_detail_keys
            else:
                expected_keys = detail_keys

        if not isinstance(export_locations, (list, tuple, set)):
            export_locations = (export_locations, )

        for export_location in export_locations:

            # Check that the correct keys are present
            self.assertEqual(len(expected_keys), len(export_location))
            for key in expected_keys:
                self.assertIn(key, export_location)

            # Check the format of ever-present summary keys
            self.assertTrue(uuidutils.is_uuid_like(export_location['id']))
            self.assertTrue(isinstance(export_location['path'],
                                       six.string_types))

            if utils.is_microversion_ge(version, '2.14'):
                self.assertIn(export_location['preferred'], (True, False))

            if role == 'admin':
                self.assertIn(export_location['is_admin_only'], (True, False))
                self.assertTrue(uuidutils.is_uuid_like(
                    export_location['share_instance_id']))

            # Check the format of the detail keys
            if format == 'detail':
                for time in (export_location['created_at'],
                             export_location['updated_at']):
                    # If var 'time' has incorrect value then ValueError
                    # exception is expected to be raised. So, just try parse
                    # it making assertion that it has proper date value.
                    timeutils.parse_strtime(time)
def make_test_data(name, meter_type, unit, volume, random_min,
                   random_max, user_id, project_id, resource_id,
                   resource_list, start, end, interval,
                   resource_metadata=None, source='artificial'):
    resource_metadata = resource_metadata or {}
    # Compute start and end timestamps for the new data.
    if isinstance(start, datetime.datetime):
        timestamp = start
    else:
        timestamp = timeutils.parse_strtime(start)

    if not isinstance(end, datetime.datetime):
        end = timeutils.parse_strtime(end)

    increment = datetime.timedelta(minutes=interval)

    print('Adding new events for meter %s.' % (name))
    # Generate events
    n = 0
    total_volume = volume
    while timestamp <= end:
        if (random_min >= 0 and random_max >= 0):
            # If there is a random element defined, we will add it to
            # user given volume.
            if isinstance(random_min, int) and isinstance(random_max, int):
                total_volume += random.randint(random_min, random_max)
            else:
                total_volume += random.uniform(random_min, random_max)

        c = sample.Sample(name=name,
                          type=meter_type,
                          unit=unit,
                          volume=total_volume,
                          user_id=user_id,
                          project_id=project_id,
                          resource_id=resource_list[random.randint(0,
                                                    len(resource_list) - 1)],
                          timestamp=timestamp.isoformat(),
                          resource_metadata=resource_metadata,
                          source=source,
                          )
        data = utils.meter_message_from_counter(
            c, cfg.CONF.publisher.telemetry_secret)
        # data = utils.meter_message_from_counter(
        #      c, cfg.CONF.publisher.metering_secret)

        yield data
        n += 1
        timestamp = timestamp + increment

        if (meter_type == 'gauge' or meter_type == 'delta'):
            # For delta and gauge, we don't want to increase the value
            # in time by random element. So we always set it back to
            # volume.
            total_volume = volume

    print('Added %d new events for meter %s.' % (n, name))
Example #11
0
 def test_return_valid_isoformat(self):
     """Ensure that the ec2 api returns datetime in xs:dateTime
        (which apparently isn't datetime.isoformat())
        NOTE(ken-pepple): https://bugs.launchpad.net/patron/+bug/721297
     """
     conv = apirequest._database_to_isoformat
     # sqlite database representation with microseconds
     time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
                                               "%Y-%m-%d %H:%M:%S.%f")
     self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
     # mysqlite database representation
     time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
                                               "%Y-%m-%d %H:%M:%S")
     self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
Example #12
0
 def test_return_valid_isoformat(self):
     """Ensure that the ec2 api returns datetime in xs:dateTime
        (which apparently isn't datetime.isoformat())
        NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
     """
     conv = apirequest._database_to_isoformat
     # sqlite database representation with microseconds
     time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
                                               "%Y-%m-%d %H:%M:%S.%f")
     self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
     # mysqlite database representation
     time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
                                               "%Y-%m-%d %H:%M:%S")
     self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
Example #13
0
    def update(self,
               lease_id,
               name=None,
               prolong_for=None,
               reduce_by=None,
               end_date=None,
               advance_by=None,
               defer_by=None,
               start_date=None,
               reservations=None):
        """Update attributes of the lease."""
        values = {}
        if name:
            values['name'] = name

        lease_end_date_change = prolong_for or reduce_by or end_date
        lease_start_date_change = defer_by or advance_by or start_date
        lease = None

        if lease_end_date_change:
            lease = self.get(lease_id)
            if end_date:
                date = timeutils.parse_strtime(end_date, utils.API_DATE_FORMAT)
                values['end_date'] = date.strftime(utils.API_DATE_FORMAT)
            else:
                self._add_lease_date(values, lease, 'end_date',
                                     lease_end_date_change, prolong_for
                                     is not None)

        if lease_start_date_change:
            if lease is None:
                lease = self.get(lease_id)
            if start_date:
                date = timeutils.parse_strtime(start_date,
                                               utils.API_DATE_FORMAT)
                values['start_date'] = date.strftime(utils.API_DATE_FORMAT)
            else:
                self._add_lease_date(values, lease, 'start_date',
                                     lease_start_date_change, defer_by
                                     is not None)

        if reservations:
            values['reservations'] = reservations

        if not values:
            return _('No values to update passed.')
        resp, body = self.request_manager.put('/leases/%s' % lease_id,
                                              body=values)
        return body['lease']
Example #14
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, remote_address=None, timestamp=None,
                 request_id=None, auth_token=None, overwrite=True,
                 quota_class=None, user_name=None, project_name=None,
                 service_catalog=None, instance_lock_checked=False,
                 user_auth_plugin=None, **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.

           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param user_auth_plugin: The auth plugin for the current request's
                authentication data.

           :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warning(_LW('Arguments dropped when creating context: %s') %
                        str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = context.generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2', 'key-manager')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        self.user_auth_plugin = user_auth_plugin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Example #15
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, remote_address=None, timestamp=None,
                 request_id=None, auth_token=None, overwrite=True,
                 quota_class=None, user_name=None, project_name=None,
                 service_catalog=None, instance_lock_checked=False,
                 user_auth_plugin=None, **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.

           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param user_auth_plugin: The auth plugin for the current request's
                authentication data.

           :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warning(_LW('Arguments dropped when creating context: %s') %
                        str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = context.generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2', 'key-manager')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        self.user_auth_plugin = user_auth_plugin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Example #16
0
    def __init__(self, user_id=None, project_id=None, is_admin=None,
                 read_deleted="no", remote_address=None, timestamp=None,
                 quota_class=None, service_catalog=None,
                 instance_lock_checked=False, user_auth_plugin=None, **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.

           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param user_auth_plugin: The auth plugin for the current request's
                authentication data.
        """
        if user_id:
            kwargs['user'] = user_id
        if project_id:
            kwargs['tenant'] = project_id

        super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)

        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp

        if service_catalog:
            # Only include required parts of service_catalog
            # NOTE(lyarwood): While volumev2 is no longer supported with Queens
            # we still provide it as part of the service catalog as the request
            # context may end up being sent over the wire to a Pike compute
            # that is specifically looking for a cinderv2 type via catalog_info
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('image', 'block-storage', 'volumev2',
                                     'volumev3', 'key-manager', 'placement',
                                     'network')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class

        # NOTE(dheeraj): The following attributes are used by cellsv2 to store
        # connection information for connecting to the target cell.
        # It is only manipulated using the target_cell contextmanager
        # provided by this module
        self.db_connection = None
        self.mq_connection = None

        self.user_auth_plugin = user_auth_plugin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
Example #17
0
def parse_input_params(expected_input_params):
    """Parse input parameters from request.

        :param expecred_params: (array) Expected input
                                params specified in constants.

    """
    raw_filters = _get_input_params_from_request(expected_input_params)
    filters = copy.deepcopy(raw_filters)
    date_fmt = CONF.api.input_date_format

    for key, value in filters.items():
        if key == const.START_DATE or key == const.END_DATE:
            try:
                filters[key] = timeutils.parse_strtime(value, date_fmt)
            except (ValueError, TypeError) as exc:
                raise ParseInputsError('Invalid date format: %(exc)s' %
                                       {'exc': exc})

    start_date = filters.get(const.START_DATE)
    end_date = filters.get(const.END_DATE)
    if start_date and end_date:
        if start_date > end_date:
            raise ParseInputsError('Invalid dates: %(start)s '
                                   'more than %(end)s' % {
                                       'start': const.START_DATE,
                                       'end': const.END_DATE
                                   })
    return filters
Example #18
0
 def test_to_json_with_date_format_value(self):
     test_date = tu.parse_strtime("0001-03-08T02:00:00",
                                  '%Y-%m-%dT%H:%M:%S')
     fixture = {"date": test_date}
     expected = '{"date": "0001-03-08T02:00:00"}'
     actual = serializers.JSONResponseSerializer().to_json(fixture)
     self.assertEqual(expected, actual)
Example #19
0
    def _poll_shelved_instances(self, context):

        if CONF.shelved_offload_time <= 0:
            return

        filters = {'vm_state': vm_states.SHELVED,
                   'task_state': None,
                   'host': self.host}
        shelved_instances = objects.InstanceList.get_by_filters(
            context, filters=filters, expected_attrs=['system_metadata'],
            use_slave=True)

        to_gc = []
        for instance in shelved_instances:
            sys_meta = instance.system_metadata
            shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
            if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
                to_gc.append(instance)

        for instance in to_gc:
            try:
                instance.task_state = task_states.SHELVING_OFFLOADING
                instance.save(expected_task_state=(None,))
                self.shelve_offload_instance(context, instance,
                                             clean_shutdown=False)
            except Exception:
                LOG.exception(_LE('Periodic task failed to offload instance.'),
                              instance=instance)
Example #20
0
def parse_input_params(expected_input_params):
    """Parse input parameters from request.

    :param expected_input_params: (array) Expected input
                                  params specified in constants.
    """
    raw_filters = _get_input_params_from_request(expected_input_params)
    filters = copy.deepcopy(raw_filters)
    date_fmt = CONF.api.input_date_format

    for key, value in filters.items():
        if key == const.START_DATE or key == const.END_DATE:
            try:
                filters[key] = timeutils.parse_strtime(value, date_fmt)
            except (ValueError, TypeError) as exc:
                raise api_exc.ParseInputsError(
                    'Invalid date format: %(exc)s' % {'exc': exc})

    start_date = filters.get(const.START_DATE)
    end_date = filters.get(const.END_DATE)
    if start_date and end_date:
        if start_date > end_date:
            raise api_exc.ParseInputsError(
                'Invalid dates: %(start)s more than %(end)s'
                '' % {'start': const.START_DATE, 'end': const.END_DATE})
    if const.SIGNED in filters:
        if is_authenticated():
            filters[const.OPENID] = get_user_id()
        else:
            raise api_exc.ParseInputsError(
                'To see signed test results you need to authenticate')
    return filters
    def notify_service_capabilities(self, service_name, backend, capabilities,
                                    timestamp):
        """Notify the ceilometer with updated volume stats"""
        if service_name != 'volume':
            return

        updated = []
        capa_new = self.service_states.get(backend, {})
        timestamp = timestamp or timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)

        # Compare the capabilities and timestamps to decide notifying
        if not capa_new:
            updated = self._get_updated_pools(capa_new, capabilities)
        else:
            if timestamp > self.service_states[backend]["timestamp"]:
                updated = self._get_updated_pools(
                    self.service_states[backend], capabilities)
                if not updated:
                    updated = self._get_updated_pools(
                        self.service_states_last_update.get(backend, {}),
                        self.service_states.get(backend, {}))

        if updated:
            capab_copy = dict(capabilities)
            capab_copy["timestamp"] = timestamp
            # If capabilities changes, notify and record the capabilities.
            self.service_states_last_update[backend] = capab_copy
            self.get_usage_and_notify(capabilities, updated, backend,
                                      timestamp)
    def update_from_volume_capability(self, capability, service=None):
        """Update information about a pool from its volume_node info."""
        self.update_capabilities(capability, service)
        if capability:
            timestamp = capability['timestamp']
            if isinstance(timestamp, six.string_types):
                timestamp = timeutils.parse_strtime(timestamp)
            if self.updated and self.updated > timestamp:
                return
            self.update_backend(capability)

            self.total_capacity_gb = capability.get('total_capacity_gb', 0)
            self.free_capacity_gb = capability.get('free_capacity_gb', 0)
            self.allocated_capacity_gb = capability.get(
                'allocated_capacity_gb', 0)
            self.QoS_support = capability.get('QoS_support', False)
            self.reserved_percentage = capability.get('reserved_percentage', 0)
            # provisioned_capacity_gb is the apparent total capacity of
            # all the volumes created on a backend, which is greater than
            # or equal to allocated_capacity_gb, which is the apparent
            # total capacity of all the volumes created on a backend
            # in Cinder. Using allocated_capacity_gb as the default of
            # provisioned_capacity_gb if it is not set.
            self.provisioned_capacity_gb = capability.get(
                'provisioned_capacity_gb', self.allocated_capacity_gb)
            self.max_over_subscription_ratio = capability.get(
                'max_over_subscription_ratio',
                CONF.max_over_subscription_ratio)
            self.thin_provisioning_support = capability.get(
                'thin_provisioning_support', False)
            self.thick_provisioning_support = capability.get(
                'thick_provisioning_support', False)
            self.multiattach = capability.get('multiattach', False)
Example #23
0
 def _get_sample(message, name):
     try:
         for metric in message['payload']['metrics']:
             if name == metric['name']:
                 info = {
                     'payload':
                     metric,
                     'event_type':
                     message['event_type'],
                     'publisher_id':
                     message['publisher_id'],
                     'resource_id':
                     '%s_%s' % (message['payload']['host'],
                                message['payload']['nodename']),
                     'timestamp':
                     str(timeutils.parse_strtime(metric['timestamp']))
                 }
                 return info
     except Exception as err:
         LOG.warning(
             _('An error occurred while building %(m)s '
               'sample: %(e)s') % {
                   'm': name,
                   'e': err
               })
Example #24
0
    def test_get_updated_time(self):
        updated_at_time = timeutils.parse_strtime("2016-04-18T02:56:25.198871")
        service_ref = {
            'host': 'fake-host',
            'topic': 'compute',
            'updated_at': updated_at_time.replace(tzinfo=iso8601.UTC)
        }

        # If no record returned from the mc, return record from DB
        self.mc_client.get.return_value = None
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If the record in mc is newer than DB, return record from mc
        self.mc_client.reset_mock()
        retval = timeutils.utcnow()
        self.mc_client.get.return_value = retval
        self.assertEqual(retval.replace(tzinfo=iso8601.UTC),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If the record in DB is newer than mc, return record from DB
        self.mc_client.reset_mock()
        service_ref['updated_at'] = \
            retval.replace(tzinfo=iso8601.UTC)
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If no record returned from the DB, return the record from mc
        self.mc_client.reset_mock()
        service_ref['updated_at'] = None
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(updated_at_time.replace(tzinfo=iso8601.UTC),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
Example #25
0
    def create_cache_entry(self, context, volume_ref, image_id, image_meta):
        """Create a new cache entry for an image.

        This assumes that the volume described by volume_ref has already been
        created and is in an available state.
        """
        LOG.debug(
            "Creating new image-volume cache entry for image " "%(image_id)s on host %(host)s.",
            {"image_id": image_id, "host": volume_ref["host"]},
        )

        # When we are creating an image from a volume the updated_at field
        # will be a unicode representation of the datetime. In that case
        # we just need to parse it into one. If it is an actual datetime
        # we want to just grab it as a UTC naive datetime.
        image_updated_at = image_meta["updated_at"]
        if type(image_updated_at) in [unicode, str]:
            image_updated_at = timeutils.parse_strtime(image_updated_at)
        else:
            image_updated_at = image_updated_at.astimezone(timezone("UTC"))

        cache_entry = self.db.image_volume_cache_create(
            context,
            volume_ref["host"],
            image_id,
            image_updated_at.replace(tzinfo=None),
            volume_ref["id"],
            volume_ref["size"],
        )

        LOG.debug("New image-volume cache entry created: %(entry)s.", {"entry": self._entry_to_str(cache_entry)})
        return cache_entry
Example #26
0
def parse_input_params(expected_input_params):
    """Parse input parameters from request.

    :param expected_input_params: (array) Expected input
                                  params specified in constants.
    """
    raw_filters = _get_input_params_from_request(expected_input_params)
    filters = copy.deepcopy(raw_filters)
    date_fmt = CONF.api.input_date_format

    for key, value in filters.items():
        if key == const.START_DATE or key == const.END_DATE:
            try:
                filters[key] = timeutils.parse_strtime(value, date_fmt)
            except (ValueError, TypeError) as exc:
                raise api_exc.ParseInputsError('Invalid date format: %(exc)s' %
                                               {'exc': exc})

    start_date = filters.get(const.START_DATE)
    end_date = filters.get(const.END_DATE)
    if start_date and end_date:
        if start_date > end_date:
            raise api_exc.ParseInputsError(
                'Invalid dates: %(start)s more than %(end)s'
                '' % {
                    'start': const.START_DATE,
                    'end': const.END_DATE
                })
    if const.SIGNED in filters:
        if is_authenticated():
            filters[const.OPENID] = get_user_id()
        else:
            raise api_exc.ParseInputsError(
                'To see signed test results you need to authenticate')
    return filters
Example #27
0
    def _poll_shelved_instances(self, context):

        if CONF.shelved_offload_time <= 0:
            return

        filters = {
            'vm_state': vm_states.SHELVED,
            'task_state': None,
            'host': self.host
        }
        shelved_instances = objects.InstanceList.get_by_filters(
            context,
            filters=filters,
            expected_attrs=['system_metadata'],
            use_slave=True)

        to_gc = []
        for instance in shelved_instances:
            sys_meta = instance.system_metadata
            shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
            if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
                to_gc.append(instance)

        for instance in to_gc:
            try:
                instance.task_state = task_states.SHELVING_OFFLOADING
                instance.save(expected_task_state=(None, ))
                self.shelve_offload_instance(context,
                                             instance,
                                             clean_shutdown=False)
            except Exception:
                LOG.exception(_LE('Periodic task failed to offload instance.'),
                              instance=instance)
Example #28
0
    def test_get_updated_time(self):
        updated_at_time = timeutils.parse_strtime("2016-04-18T02:56:25.198871")
        service_ref = {
            'host': 'fake-host',
            'topic': 'compute',
            'updated_at': updated_at_time.replace(tzinfo=iso8601.UTC)
        }

        # If no record returned from the mc, return record from DB
        self.mc_client.get.return_value = None
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If the record in mc is newer than DB, return record from mc
        self.mc_client.reset_mock()
        retval = timeutils.utcnow()
        self.mc_client.get.return_value = retval
        self.assertEqual(retval.replace(tzinfo=iso8601.UTC),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If the record in DB is newer than mc, return record from DB
        self.mc_client.reset_mock()
        service_ref['updated_at'] = \
            retval.replace(tzinfo=iso8601.UTC)
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        # If no record returned from the DB, return the record from mc
        self.mc_client.reset_mock()
        service_ref['updated_at'] = None
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(updated_at_time.replace(tzinfo=iso8601.UTC),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
Example #29
0
def _time(timestamp=None):
    if not timestamp:
        timestamp = timeutils.utcnow()
        print timestamp
    if isinstance(timestamp, six.string_types):
        timestamp = timeutils.parse_strtime(timestamp)
        return timestamp
Example #30
0
def parse_input_params(expected_input_params):
    """Parse input parameters from request.

        :param expecred_params: (array) Expected input
                                params specified in constants.

    """
    raw_filters = _get_input_params_from_request(expected_input_params)
    filters = copy.deepcopy(raw_filters)
    date_fmt = CONF.api.input_date_format

    for key, value in filters.items():
        if key == const.START_DATE or key == const.END_DATE:
            try:
                filters[key] = timeutils.parse_strtime(value, date_fmt)
            except (ValueError, TypeError) as exc:
                raise ParseInputsError('Invalid date format: %(exc)s'
                                       % {'exc': exc})

    start_date = filters.get(const.START_DATE)
    end_date = filters.get(const.END_DATE)
    if start_date and end_date:
        if start_date > end_date:
            raise ParseInputsError('Invalid dates: %(start)s '
                                   'more than %(end)s' % {
                                       'start': const.START_DATE,
                                       'end': const.END_DATE
                                   })
    return filters
Example #31
0
    def test_get_updated_time(self):
        updated_at_time = timeutils.parse_strtime("2016-04-18T02:56:25.198871")
        service_ref = {
            'host': 'fake-host',
            'topic': 'compute',
            'updated_at': updated_at_time.replace(tzinfo=iso8601.iso8601.Utc())
        }

        self.mc_client.get.return_value = None
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        self.mc_client.reset_mock()
        retval = timeutils.utcnow()
        self.mc_client.get.return_value = retval
        self.assertEqual(retval.replace(tzinfo=iso8601.iso8601.Utc()),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        self.mc_client.reset_mock()
        service_ref['updated_at'] = \
            retval.replace(tzinfo=iso8601.iso8601.Utc())
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
Example #32
0
    def test_get_updated_time(self):
        updated_at_time = timeutils.parse_strtime("2016-04-18T02:56:25.198871")
        service_ref = {
            'host': 'fake-host',
            'topic': 'compute',
            'updated_at': updated_at_time.replace(tzinfo=iso8601.UTC)
        }

        self.mc_client.get.return_value = None
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        self.mc_client.reset_mock()
        retval = timeutils.utcnow()
        self.mc_client.get.return_value = retval
        self.assertEqual(retval.replace(tzinfo=iso8601.UTC),
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
        self.mc_client.reset_mock()
        service_ref['updated_at'] = \
            retval.replace(tzinfo=iso8601.UTC)
        self.mc_client.get.return_value = updated_at_time
        self.assertEqual(service_ref['updated_at'],
                         self.servicegroup_api.get_updated_time(service_ref))
        self.mc_client.get.assert_called_once_with('compute:fake-host')
Example #33
0
 def _set_status(self, id):
     service = objects.Service.get(pecan.request.context, id)
     last_heartbeat = (service.last_seen_up or service.updated_at
                       or service.created_at)
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= CONF.service_down_time
     if not is_up:
         LOG.warning(
             'Seems service %(name)s on host %(host)s is down. '
             'Last heartbeat was %(lhb)s.'
             'Elapsed time is %(el)s', {
                 'name': service.name,
                 'host': service.host,
                 'lhb': str(last_heartbeat),
                 'el': str(elapsed)
             })
         self._status = objects.service.ServiceStatus.FAILED
     else:
         self._status = objects.service.ServiceStatus.ACTIVE
Example #34
0
    def to_time_zone(self,
                     datetime,
                     tzone=None,
                     input_fmt=None,
                     localize=False):
        """Changes given datetime string into given timezone

        :param datetime: datetime string
        :param tzone: (optional) timezone as a string (e.g. "Europe/Paris"),
               by default it's the current django timezone
        :param input_fmt: (optional) format of datetime param, if None then
               the default Sahara API format (%Y-%m-%dT%H:%M:%S) will be used
        :param localize: (optional) if True then format of datetime will be
               localized according to current timezone else it will be in
               the default Sahara API format (%Y-%m-%dT%H:%M:%S)
        :return datetime string in the current django timezone
        """

        default_fmt = '%Y-%m-%dT%H:%M:%S'
        if tzone is None:
            tzone = self.request.session.get('django_timezone', 'UTC')
        if input_fmt is None:
            input_fmt = default_fmt
        dt_in_utc = timezone.utc.localize(
            timeutils.parse_strtime(datetime, input_fmt))
        dt_in_zone = dt_in_utc.astimezone(ptz(tzone))
        if localize:
            return filters.date(dt_in_zone, "DATETIME_FORMAT")
        else:
            return dt_in_zone.strftime(default_fmt)
Example #35
0
 def parse_strtime(strtime):
     if _ms_time_regex.match(strtime):
         # NOTE(MotoKen): time format for aws-sdk-java contains millisecond
         time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
     else:
         time_format = "%Y-%m-%dT%H:%M:%SZ"
     return timeutils.parse_strtime(strtime, time_format)
Example #36
0
    def instance_update(self, context, instance_uuid, updates, service):
        for key, value in six.iteritems(updates):
            if key not in allowed_updates:
                LOG.error(
                    _LE("Instance update attempted for "
                        "'%(key)s' on %(instance_uuid)s"), {
                            'key': key,
                            'instance_uuid': instance_uuid
                        })
                raise KeyError("unexpected update keyword '%s'" % key)
            if key in datetime_fields and isinstance(value, six.string_types):
                updates[key] = timeutils.parse_strtime(value)

        # NOTE(danms): the send_update() call below is going to want to know
        # about the flavor, so we need to join the appropriate things here,
        # and objectify the result.
        old_ref, instance_ref = self.db.instance_update_and_get_original(
            context,
            instance_uuid,
            updates,
            columns_to_join=['system_metadata'])
        inst_obj = objects.Instance._from_db_object(
            context,
            objects.Instance(),
            instance_ref,
            expected_attrs=['system_metadata'])
        notifications.send_update(context, old_ref, inst_obj, service)
        return jsonutils.to_primitive(instance_ref)
Example #37
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 service_catalog=None,
                 **kwargs):
        """Initialize RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(_LW('Arguments dropped when creating context: %s'),
                     str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if service_catalog:
            self.service_catalog = [
                s for s in service_catalog
                if s.get('type') in ('compute', 'volume')
            ]
        else:
            self.service_catalog = []

        if not request_id:
            request_id = common_context.generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token
        self.quota_class = quota_class
        if overwrite or not common_context.get_current():
            self.update_store()
Example #38
0
 def is_up(self, service_ref):
     """Moved from nova.utils
     Check whether a service is up based on last heartbeat.
     """
     # Keep checking 'updated_at' if 'last_seen_up' isn't set.
     # Should be able to use only 'last_seen_up' in the M release
     last_heartbeat = (service_ref.get('last_seen_up')
                       or service_ref['updated_at']
                       or service_ref['created_at'])
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service_ref came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     # Timestamps in DB are UTC.
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= self.service_down_time
     if not is_up:
         LOG.debug(
             'Seems service %(binary)s on host %(host)s is down. '
             'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', {
                 'binary': service_ref.get('binary'),
                 'host': service_ref.get('host'),
                 'lhb': str(last_heartbeat),
                 'el': str(elapsed)
             })
     return is_up
Example #39
0
 def test_to_json_with_date_format_value(self):
     test_date = tu.parse_strtime("0001-03-08T02:00:00",
                                  '%Y-%m-%dT%H:%M:%S')
     fixture = {"date": test_date}
     expected = '{"date": "0001-03-08T02:00:00"}'
     actual = serializers.JSONResponseSerializer().to_json(fixture)
     self.assertEqual(expected, actual)
Example #40
0
    def to_time_zone(self, datetime, tzone=None,
                     input_fmt=None, localize=False):
        """Changes given datetime string into given timezone

        :param datetime: datetime string
        :param tzone: (optional) timezone as a string (e.g. "Europe/Paris"),
               by default it's the current django timezone
        :param input_fmt: (optional) format of datetime param, if None then
               the default Sahara API format (%Y-%m-%dT%H:%M:%S) will be used
        :param localize: (optional) if True then format of datetime will be
               localized according to current timezone else it will be in
               the default Sahara API format (%Y-%m-%dT%H:%M:%S)
        :return datetime string in the current django timezone
        """

        default_fmt = '%Y-%m-%dT%H:%M:%S'
        if tzone is None:
            tzone = self.request.session.get('django_timezone', 'UTC')
        if input_fmt is None:
            input_fmt = default_fmt
        dt_in_utc = timezone.utc.localize(
            timeutils.parse_strtime(datetime, input_fmt))
        dt_in_zone = dt_in_utc.astimezone(ptz(tzone))
        if localize:
            return filters.date(dt_in_zone, "DATETIME_FORMAT")
        else:
            return dt_in_zone.strftime(default_fmt)
Example #41
0
    def create_cache_entry(self, context, volume_ref, image_id, image_meta):
        """Create a new cache entry for an image.

        This assumes that the volume described by volume_ref has already been
        created and is in an available state.
        """
        LOG.debug('Creating new image-volume cache entry for image '
                  '%(image_id)s on host %(host)s.',
                  {'image_id': image_id, 'host': volume_ref['host']})

        # When we are creating an image from a volume the updated_at field
        # will be a unicode representation of the datetime. In that case
        # we just need to parse it into one. If it is an actual datetime
        # we want to just grab it as a UTC naive datetime.
        image_updated_at = image_meta['updated_at']
        if isinstance(image_updated_at, six.string_types):
            image_updated_at = timeutils.parse_strtime(image_updated_at)
        else:
            image_updated_at = image_updated_at.astimezone(timezone('UTC'))

        cache_entry = self.db.image_volume_cache_create(
            context,
            volume_ref['host'],
            image_id,
            image_updated_at.replace(tzinfo=None),
            volume_ref['id'],
            volume_ref['size']
        )

        LOG.debug('New image-volume cache entry created: %(entry)s.',
                  {'entry': self._entry_to_str(cache_entry)})
        return cache_entry
Example #42
0
    def create_cache_entry(self, context, volume_ref, image_id, image_meta):
        """Create a new cache entry for an image.

        This assumes that the volume described by volume_ref has already been
        created and is in an available state.
        """
        LOG.debug(
            'Creating new image-volume cache entry for image '
            '%(image_id)s on %(service)s', {
                'image_id': image_id,
                'service': volume_ref.service_topic_queue
            })

        # When we are creating an image from a volume the updated_at field
        # will be a unicode representation of the datetime. In that case
        # we just need to parse it into one. If it is an actual datetime
        # we want to just grab it as a UTC naive datetime.
        image_updated_at = image_meta['updated_at']
        if isinstance(image_updated_at, six.string_types):
            image_updated_at = timeutils.parse_strtime(image_updated_at)
        else:
            image_updated_at = image_updated_at.astimezone(timezone('UTC'))

        cache_entry = self.db.image_volume_cache_create(
            context, volume_ref.host, volume_ref.cluster_name, image_id,
            image_updated_at.replace(tzinfo=None), volume_ref.id,
            volume_ref.size)

        LOG.debug('New image-volume cache entry created: %(entry)s.',
                  {'entry': self._entry_to_str(cache_entry)})
        return cache_entry
Example #43
0
File: db.py Project: wxy2933/nova
 def is_up(self, service_ref):
     """Moved from nova.utils
     Check whether a service is up based on last heartbeat.
     """
     last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service_ref came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     # Timestamps in DB are UTC.
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= self.service_down_time
     if not is_up:
         LOG.debug(
             'Seems service is down. Last heartbeat was %(lhb)s. '
             'Elapsed time is %(el)s', {
                 'lhb': str(last_heartbeat),
                 'el': str(elapsed)
             })
     return is_up
Example #44
0
 def assertValidISO8601ExtendedFormatDatetime(self, dt):
     try:
         return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
     except Exception:
         msg = '%s is not a valid ISO 8601 extended format date time.' % dt
         raise AssertionError(msg)
     self.assertIsInstance(dt, datetime.datetime)
Example #45
0
 def _add_lease_date(self, values, lease, key, delta_date, positive_delta):
     delta_sec = utils.from_elapsed_time_to_delta(
         delta_date,
         pos_sign=positive_delta)
     date = timeutils.parse_strtime(lease[key],
                                    utils.LEASE_DATE_FORMAT)
     values[key] = (date + delta_sec).strftime(utils.API_DATE_FORMAT)
Example #46
0
File: db.py Project: arbrandes/nova
 def is_up(self, service_ref):
     """Moved from nova.utils
     Check whether a service is up based on last heartbeat.
     """
     last_heartbeat = (service_ref.get('last_seen_up') or
         service_ref['created_at'])
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service_ref came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     # Timestamps in DB are UTC.
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= self.service_down_time
     if not is_up:
         LOG.debug('Seems service %(binary)s on host %(host)s is down. '
                   'Last heartbeat was %(lhb)s. Elapsed time is %(el)s',
                   {'binary': service_ref.get('binary'),
                    'host': service_ref.get('host'),
                    'lhb': str(last_heartbeat), 'el': str(elapsed)})
     return is_up
Example #47
0
    def __init__(self,
                 user_id,
                 project_id,
                 request_id=None,
                 is_admin=None,
                 remote_address=None,
                 auth_token=None,
                 user_name=None,
                 project_name=None,
                 overwrite=True,
                 service_catalog=None,
                 api_version=None,
                 is_os_admin=None,
                 **kwargs):
        """Parameters

            :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.


            :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        user = kwargs.pop('user', None)
        tenant = kwargs.pop('tenant', None)
        super(RequestContext,
              self).__init__(auth_token=auth_token,
                             user=user_id or user,
                             tenant=project_id or tenant,
                             is_admin=is_admin,
                             request_id=request_id,
                             resource_uuid=kwargs.pop('resource_uuid', None),
                             overwrite=overwrite)
        # oslo_context's RequestContext.to_dict() generates this field, we can
        # safely ignore this as we don't use it.
        kwargs.pop('user_identity', None)
        if kwargs:
            LOG.warning(
                _LW('Arguments dropped when creating context: %s') %
                str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.remote_address = remote_address
        timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp

        self.service_catalog = service_catalog
        if self.service_catalog is None:
            # if list is empty or none
            self.service_catalog = []

        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        # TODO(ft): call policy.check_is_admin if is_admin is None
        self.is_os_admin = is_os_admin
        self.api_version = api_version
 def parse_strtime(strtime):
     if _ms_time_regex.match(strtime):
         # NOTE(MotoKen): time format for aws-sdk-java contains millisecond
         time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
     else:
         time_format = "%Y-%m-%dT%H:%M:%SZ"
     return timeutils.parse_strtime(strtime, time_format)
Example #49
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, remote_address=None, timestamp=None,
                 request_id=None, auth_token=None, overwrite=True,
                 quota_class=None, service_catalog=None, **kwargs):
        """Initialize RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """

        user = kwargs.pop('user', None)
        tenant = kwargs.pop('tenant', None)
        super(RequestContext, self).__init__(
            auth_token=auth_token,
            user=user_id or user,
            tenant=project_id or tenant,
            domain=kwargs.pop('domain', None),
            user_domain=kwargs.pop('user_domain', None),
            project_domain=kwargs.pop('project_domain', None),
            is_admin=is_admin,
            read_only=kwargs.pop('read_only', False),
            show_deleted=kwargs.pop('show_deleted', False),
            request_id=request_id,
            resource_uuid=kwargs.pop('resource_uuid', None),
            overwrite=overwrite,
            roles=roles)

        kwargs.pop('user_identity', None)
        if kwargs:
            LOG.warning(_LW('Arguments dropped when creating context: %s.'),
                        str(kwargs))
        self.user_id = self.user
        self.project_id = self.tenant

        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if service_catalog:
            self.service_catalog = [s for s in service_catalog
                                    if s.get('type') in ('compute', 'volume')]
        else:
            self.service_catalog = []

        self.quota_class = quota_class
Example #50
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 user_name=None,
                 project_name=None,
                 service_catalog=None,
                 **kwargs):
        """
        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """

        super(RequestContext, self).__init__(auth_token=auth_token,
                                             user=user_id,
                                             tenant=project_id,
                                             is_admin=is_admin,
                                             request_id=request_id,
                                             overwrite=overwrite,
                                             roles=roles)

        if kwargs:
            LOG.warning(
                _('Arguments dropped when creating context: %s') % str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp

        self.service_catalog = service_catalog

        self.user_name = user_name
        self.project_name = project_name

        self.operation = None
        self.operation_start_time = None
        self.operation_get_progress_method = None
        self.operation_item_id = None
        self.operation_data = {}
Example #51
0
def fake_vpn_instance():
    return objects.Instance(
        id=7,
        image_ref=CONF.vpn_image_id,
        vm_state='active',
        created_at=timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
        uuid=uuid,
        project_id=project_id)
Example #52
0
    def _get_share_networks(self, req, is_detail=True):
        """Returns a list of share networks."""
        context = req.environ["manila.context"]
        search_opts = {}
        search_opts.update(req.GET)

        if "all_tenants" in search_opts or (
            "project_id" in search_opts and search_opts["project_id"] != context.project_id
        ):
            policy.check_policy(context, RESOURCE_NAME, "get_all_share_networks")

        if "security_service_id" in search_opts:
            networks = db_api.share_network_get_all_by_security_service(context, search_opts["security_service_id"])
        elif "project_id" in search_opts and search_opts["project_id"] != context.project_id:
            networks = db_api.share_network_get_all_by_project(context, search_opts["project_id"])
        elif "all_tenants" in search_opts:
            networks = db_api.share_network_get_all(context)
        else:
            networks = db_api.share_network_get_all_by_project(context, context.project_id)

        date_parsing_error_msg = """%s is not in yyyy-mm-dd format."""
        if "created_since" in search_opts:
            try:
                created_since = timeutils.parse_strtime(search_opts["created_since"], fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts["created_since"]
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [network for network in networks if network["created_at"] >= created_since]
        if "created_before" in search_opts:
            try:
                created_before = timeutils.parse_strtime(search_opts["created_before"], fmt="%Y-%m-%d")
            except ValueError:
                msg = date_parsing_error_msg % search_opts["created_before"]
                raise exc.HTTPBadRequest(explanation=msg)
            networks = [network for network in networks if network["created_at"] <= created_before]
        opts_to_remove = ["all_tenants", "created_since", "created_before", "limit", "offset", "security_service_id"]
        for opt in opts_to_remove:
            search_opts.pop(opt, None)
        if search_opts:
            for key, value in six.iteritems(search_opts):
                if key in ["ip_version", "segmentation_id"]:
                    value = int(value)
                networks = [network for network in networks if network[key] == value]

        limited_list = common.limited(networks, req)
        return self._view_builder.build_share_networks(limited_list, is_detail)
Example #53
0
    def __init__(self, user_id=None, project_id=None, is_admin=None,
                 read_deleted="no", remote_address=None, timestamp=None,
                 quota_class=None, user_name=None, project_name=None,
                 service_catalog=None, instance_lock_checked=False,
                 user_auth_plugin=None, **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.

           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param user_auth_plugin: The auth plugin for the current request's
                authentication data.
        """
        if user_id:
            kwargs['user'] = user_id
        if project_id:
            kwargs['tenant'] = project_id

        super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)

        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2', 'key-manager',
                                     'placement')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name

        # NOTE(dheeraj): The following attributes are used by cellsv2 to store
        # connection information for connecting to the target cell.
        # It is only manipulated using the target_cell contextmanager
        # provided by this module
        self.db_connection = None
        self.mq_connection = None

        self.user_auth_plugin = user_auth_plugin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
 def _get_dummy_event_obj(self):
     return {
         'resource_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
         'resource_state': 'ACTIVE',
         'resource_type': 'VNF',
         'event_details': '',
         'event_type': 'scale_up',
         'timestamp': timeutils.parse_strtime('2016-07-20T05:43:52.765172')
     }
Example #55
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, project_name=None, remote_address=None,
                 timestamp=None, request_id=None, auth_token=None,
                 overwrite=True, quota_class=None, service_catalog=None,
                 domain=None, user_domain=None, project_domain=None,
                 **kwargs):
        """Initialize RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """

        super(RequestContext, self).__init__(auth_token=auth_token,
                                             user=user_id,
                                             tenant=project_id,
                                             domain=domain,
                                             user_domain=user_domain,
                                             project_domain=project_domain,
                                             is_admin=is_admin,
                                             request_id=request_id)
        self.roles = roles or []
        self.project_name = project_name
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        self.quota_class = quota_class
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                                    if s.get('type') in
                                    ('identity', 'compute', 'object-store')]
        else:
            # if list is empty or none
            self.service_catalog = []

        # We need to have RequestContext attributes defined
        # when policy.check_is_admin invokes request logging
        # to make it loggable.
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self.roles)
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
Example #56
0
    def report_state(self, context, **kwargs):
        time = kwargs['time']
        time = timeutils.parse_strtime(time)
        if self.START_TIME > time:
            LOG.info("H3CDriver Message with invalid time stamp received")
            return

        agent_state = kwargs['agent_state']['agent_state']
        self._create_or_update_agent(context, agent_state)
        LOG.info("H3CDriver(%s) report_state", agent_state['host'])
Example #57
0
    def update(self, lease_id, name=None, prolong_for=None, reduce_by=None,
               end_date=None, advance_by=None, defer_by=None, start_date=None,
               reservations=None):
        """Update attributes of the lease."""
        values = {}
        if name:
            values['name'] = name

        lease_end_date_change = prolong_for or reduce_by or end_date
        lease_start_date_change = defer_by or advance_by or start_date
        lease = None

        if lease_end_date_change:
            lease = self.get(lease_id)
            if end_date:
                date = timeutils.parse_strtime(end_date, utils.API_DATE_FORMAT)
                values['end_date'] = date.strftime(utils.API_DATE_FORMAT)
            else:
                self._add_lease_date(values, lease, 'end_date',
                                     lease_end_date_change,
                                     prolong_for is not None)

        if lease_start_date_change:
            if lease is None:
                lease = self.get(lease_id)
            if start_date:
                date = timeutils.parse_strtime(start_date,
                                               utils.API_DATE_FORMAT)
                values['start_date'] = date.strftime(utils.API_DATE_FORMAT)
            else:
                self._add_lease_date(values, lease, 'start_date',
                                     lease_start_date_change,
                                     defer_by is not None)

        if reservations:
            values['reservations'] = reservations

        if not values:
            return _('No values to update passed.')
        resp, body = self.request_manager.put('/leases/%s' % lease_id,
                                              body=values)
        return body['lease']