def test_trusted_filter_trusted_and_locale_formated_vtime_passes(
            self, req_mock):
        oat_data = {
            "hosts": [
                {
                    "host_name": "host1",
                    "trust_lvl": "trusted",
                    "vtime": timeutils.strtime(fmt="%c")
                },
                {
                    "host_name": "host2",
                    "trust_lvl": "trusted",
                    "vtime": timeutils.strtime(fmt="%D")
                },
                # This is just a broken date to ensure that
                # we're not just arbitrarily accepting any
                # date format.
            ]
        }
        req_mock.return_value = requests.codes.OK, oat_data
        extra_specs = {'trust:trusted_host': 'trusted'}
        filter_properties = {
            'context': mock.sentinel.ctx,
            'instance_type': {
                'memory_mb': 1024,
                'extra_specs': extra_specs
            }
        }
        host = fakes.FakeHostState('host1', 'host1', {})
        bad_host = fakes.FakeHostState('host2', 'host2', {})

        self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
        self.assertFalse(self.filt_cls.host_passes(bad_host,
                                                   filter_properties))
Exemplo n.º 2
0
 def _cooldown_timestamp(self, reason):
     # Save resource metadata with a timestamp and reason
     # If we wanted to implement the AutoScaling API like AWS does,
     # we could maintain event history here, but since we only need
     # the latest event for cooldown, just store that for now
     metadata = {timeutils.strtime(): reason}
     self.metadata_set(metadata)
Exemplo n.º 3
0
    def to_dict(self):
        user_idt = (
            self.user_idt_format.format(user=self.user or '-',
                                        tenant=self.tenant or '-',
                                        domain=self.domain or '-',
                                        user_domain=self.user_domain or '-',
                                        p_domain=self.project_domain or '-'))

        return {'user_id': self.user_id,
                'project_id': self.project_id,
                'project_name': self.project_name,
                'domain': self.domain,
                'user_domain': self.user_domain,
                'project_domain': self.project_domain,
                'is_admin': self.is_admin,
                'read_deleted': self.read_deleted,
                'roles': self.roles,
                'remote_address': self.remote_address,
                'timestamp': timeutils.strtime(self.timestamp),
                'request_id': self.request_id,
                'auth_token': self.auth_token,
                'quota_class': self.quota_class,
                'service_catalog': self.service_catalog,
                'tenant': self.tenant,
                'user': self.user,
                'user_identity': user_idt}
Exemplo n.º 4
0
    def show(self, req, id):
        """Retrieve tenant_usage for a specified tenant."""
        tenant_id = id
        context = req.environ['nova.context']

        authorize_show(context, {'project_id': tenant_id})

        try:
            (period_start, period_stop, ignore) = self._get_datetime_range(req)
        except exception.InvalidStrTime as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())

        now = timeutils.parse_isotime(timeutils.strtime())
        if period_stop > now:
            period_stop = now
        usage = self._tenant_usages_for_period(context,
                                               period_start,
                                               period_stop,
                                               tenant_id=tenant_id,
                                               detailed=True)
        if len(usage):
            usage = usage[0]
        else:
            usage = {}
        return {'tenant_usage': usage}
Exemplo n.º 5
0
def register_extension(vim,
                       key,
                       type,
                       label='OpenStack',
                       summary='OpenStack services',
                       version='1.0'):
    """Create a new extention.

    :param vim: Vim object
    :param key: the key for the extension
    :param type: Managed entity type, as defined by the extension. This
                 matches the type field in the configuration about a
                 virtual machine or vApp
    :param label: Display label
    :param summary: Summary description
    :param version: Extension version number as a dot-separated string
    """
    extension_manager = vim.service_content.extensionManager
    client_factory = vim.client.factory
    os_ext = client_factory.create('ns0:Extension')
    os_ext.key = key
    entity_info = client_factory.create('ns0:ExtManagedEntityInfo')
    entity_info.type = type
    os_ext.managedEntityInfo = [entity_info]
    os_ext.version = version
    desc = client_factory.create('ns0:Description')
    desc.label = label
    desc.summary = summary
    os_ext.description = desc
    os_ext.lastHeartbeatTime = timeutils.strtime()
    vim.client.service.RegisterExtension(extension_manager, os_ext)
Exemplo n.º 6
0
    def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
                      impersonation=None, expires=None, role_ids=None,
                      role_names=None, remaining_uses=None):
        ref = dict()
        ref['id'] = uuid.uuid4().hex
        ref['trustor_user_id'] = trustor_user_id
        ref['trustee_user_id'] = trustee_user_id
        ref['impersonation'] = impersonation or False
        ref['project_id'] = project_id
        ref['remaining_uses'] = remaining_uses

        if isinstance(expires, six.string_types):
            ref['expires_at'] = expires
        elif isinstance(expires, dict):
            ref['expires_at'] = timeutils.strtime(
                timeutils.utcnow() + datetime.timedelta(**expires),
                fmt=TIME_FORMAT)
        elif expires is None:
            pass
        else:
            raise NotImplementedError('Unexpected value for "expires"')

        role_ids = role_ids or []
        role_names = role_names or []
        if role_ids or role_names:
            ref['roles'] = []
            for role_id in role_ids:
                ref['roles'].append({'id': role_id})
            for role_name in role_names:
                ref['roles'].append({'name': role_name})

        return ref
Exemplo n.º 7
0
    def to_dict(self):
        user_idt = (self.user_idt_format.format(user=self.user or '-',
                                                tenant=self.tenant or '-',
                                                domain=self.domain or '-',
                                                user_domain=self.user_domain
                                                or '-',
                                                p_domain=self.project_domain
                                                or '-'))

        return {
            'user_id': self.user_id,
            'project_id': self.project_id,
            'project_name': self.project_name,
            'domain': self.domain,
            'user_domain': self.user_domain,
            'project_domain': self.project_domain,
            'is_admin': self.is_admin,
            'read_deleted': self.read_deleted,
            'roles': self.roles,
            'remote_address': self.remote_address,
            'timestamp': timeutils.strtime(self.timestamp),
            'request_id': self.request_id,
            'auth_token': self.auth_token,
            'quota_class': self.quota_class,
            'service_catalog': self.service_catalog,
            'tenant': self.tenant,
            'user': self.user,
            'user_identity': user_idt
        }
Exemplo n.º 8
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(_LE("Account for Volume ID %s was not found on "
                          "the SolidFire Cluster while attempting "
                          "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {'volumeID': int(sf_vol['volumeID']),
                  'attributes': attributes}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Exemplo n.º 9
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(
                _LE("Account for Volume ID %s was not found on "
                    "the SolidFire Cluster while attempting "
                    "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {
            'volumeID': int(sf_vol['volumeID']),
            'attributes': attributes
        }

        data = self._issue_api_request('ModifyVolume', params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Exemplo n.º 10
0
    def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
                      impersonation=None, expires=None, role_ids=None,
                      role_names=None, remaining_uses=None):
        ref = self.new_ref()

        ref['trustor_user_id'] = trustor_user_id
        ref['trustee_user_id'] = trustee_user_id
        ref['impersonation'] = impersonation or False
        ref['project_id'] = project_id
        ref['remaining_uses'] = remaining_uses

        if isinstance(expires, six.string_types):
            ref['expires_at'] = expires
        elif isinstance(expires, dict):
            ref['expires_at'] = timeutils.strtime(
                timeutils.utcnow() + datetime.timedelta(**expires),
                fmt=TIME_FORMAT)
        elif expires is None:
            pass
        else:
            raise NotImplementedError('Unexpected value for "expires"')

        role_ids = role_ids or []
        role_names = role_names or []
        if role_ids or role_names:
            ref['roles'] = []
            for role_id in role_ids:
                ref['roles'].append({'id': role_id})
            for role_name in role_names:
                ref['roles'].append({'name': role_name})

        return ref
Exemplo n.º 11
0
    def show(self, req, id):
        """Retrieve tenant_usage for a specified tenant."""
        tenant_id = id
        context = req.environ['nova.context']

        authorize_show(context, {'project_id': tenant_id})

        try:
            (period_start, period_stop, ignore) = self._get_datetime_range(
                req)
        except exception.InvalidStrTime as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())

        now = timeutils.parse_isotime(timeutils.strtime())
        if period_stop > now:
            period_stop = now
        usage = self._tenant_usages_for_period(context,
                                               period_start,
                                               period_stop,
                                               tenant_id=tenant_id,
                                               detailed=True)
        if len(usage):
            usage = usage[0]
        else:
            usage = {}
        return {'tenant_usage': usage}
Exemplo n.º 12
0
 def test_validate_ec2_timestamp_advanced_time_expired(self):
     timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
     params = {
         'Timestamp': timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
     }
     expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
     self.assertTrue(expired)
Exemplo n.º 13
0
def register_extension(vim, key, type, label='OpenStack',
                       summary='OpenStack services', version='1.0'):
    """Create a new extention.

    :param vim: Vim object
    :param key: the key for the extension
    :param type: Managed entity type, as defined by the extension. This
                 matches the type field in the configuration about a
                 virtual machine or vApp
    :param label: Display label
    :param summary: Summary description
    :param version: Extension version number as a dot-separated string
    """
    extension_manager = vim.service_content.extensionManager
    client_factory = vim.client.factory
    os_ext = client_factory.create('ns0:Extension')
    os_ext.key = key
    entity_info = client_factory.create('ns0:ExtManagedEntityInfo')
    entity_info.type = type
    os_ext.managedEntityInfo = [entity_info]
    os_ext.version = version
    desc = client_factory.create('ns0:Description')
    desc.label = label
    desc.summary = summary
    os_ext.description = desc
    os_ext.lastHeartbeatTime = timeutils.strtime()
    vim.client.service.RegisterExtension(extension_manager, os_ext)
 def _register_cfg_agent(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': FIRST_CFG_AGENT},
                           time=timeutils.strtime())
     agent_db = self.core_plugin.get_agents_db(self.adminContext,
                                               filters={'host': [HOST]})
     self.agent_id1 = agent_db[0].id
Exemplo n.º 15
0
    def _sanitizer(self, obj):
        def to_primitive(_type, _value):
            return {"_type": _type, "_value": _value}

        if isinstance(obj, datetime.datetime):
            return to_primitive("datetime", timeutils.strtime(obj))

        return super(RPCJSONSerializer, self)._sanitizer(obj)
Exemplo n.º 16
0
 def _register_ml2_agents(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_2},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_3},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_4},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_5},
                           time=timeutils.strtime())
Exemplo n.º 17
0
 def report_state(self, context, agent_state, use_call=False):
     cctxt = self.client.prepare()
     kwargs = {
         'agent_state': {'agent_state': agent_state},
         'time': timeutils.strtime(),
     }
     method = cctxt.call if use_call else cctxt.cast
     return method(context, 'report_state', **kwargs)
Exemplo n.º 18
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
     return prefix
Exemplo n.º 19
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
     return prefix
 def _register_cfg_agent(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': FIRST_CFG_AGENT},
                           time=timeutils.strtime())
     agent_db = self.core_plugin.get_agents_db(self.adminContext,
                                               filters={'host': [HOST]})
     self.agent_id1 = agent_db[0].id
Exemplo n.º 21
0
 def _register_ml2_agents(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_2},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_3},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_4},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_5},
                           time=timeutils.strtime())
Exemplo n.º 22
0
 def _new_oauth_token_with_expires_at(self):
     key, secret, token = self._new_oauth_token()
     expires_at = timeutils.strtime()
     params = {'oauth_token': key,
               'oauth_token_secret': secret,
               'oauth_expires_at': expires_at}
     token = urlparse.urlencode(params)
     return (key, secret, expires_at, token)
Exemplo n.º 23
0
    def _sanitizer(self, obj):
        def to_primitive(_type, _value):
            return {"_type": _type, "_value": _value}

        if isinstance(obj, datetime.datetime):
            return to_primitive("datetime", timeutils.strtime(obj))

        return super(RPCJSONSerializer, self)._sanitizer(obj)
Exemplo n.º 24
0
    def test_validate_ec2_timestamp_advanced_time(self):

        # EC2 request with Timestamp in advanced time
        timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
        params = {'Timestamp': timeutils.strtime(timestamp,
                                           "%Y-%m-%dT%H:%M:%SZ")}
        expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
        self.assertFalse(expired)
Exemplo n.º 25
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid, 'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'os_imported_at': import_time,
            'old_name': sfname
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': volume['name'],
            'volumeID': sf_ref['volumeID'],
            'accountID': sfaccount['accountID'],
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        data = self._issue_api_request('ModifyVolume', params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Exemplo n.º 26
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'created_at': create_time
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': 'UUID-%s' % volume['id'],
            'accountID': None,
            'sliceCount': slice_count,
            'totalSize': int(volume['size'] * units.Gi),
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        # NOTE(jdg): Check if we're a migration tgt, if so
        # use the old volume-id here for the SF Name
        migration_status = volume.get('migration_status', None)
        if migration_status and 'target' in migration_status:
            k, v = migration_status.split(':')
            params['name'] = 'UUID-%s' % v
            params['attributes']['migration_uuid'] = volume['id']
            params['attributes']['uuid'] = v

        return self._do_volume_create(volume['project_id'], params)
Exemplo n.º 27
0
    def test_not_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
        previous_meta = {timeutils.strtime(awhile_ago): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Exemplo n.º 28
0
    def test_is_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertTrue(pol._cooldown_inprogress())
Exemplo n.º 29
0
 def fake_get_timestamp(ds_browser, ds_path):
     self.assertEqual('fake-ds-browser', ds_browser)
     self.assertEqual('[fake-ds] fake-path', str(ds_path))
     if not self.exists:
         return
     ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
             timeutils.strtime(at=self._time,
                               fmt=imagecache.TIMESTAMP_FORMAT))
     return ts
Exemplo n.º 30
0
    def test_is_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertTrue(pol._cooldown_inprogress())
Exemplo n.º 31
0
 def fake_get_timestamp(ds_browser, ds_path):
     self.assertEqual('fake-ds-browser', ds_browser)
     self.assertEqual('[fake-ds] fake-path', str(ds_path))
     if not self.exists:
         return
     ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
             timeutils.strtime(at=self._time,
                               fmt=imagecache.TIMESTAMP_FORMAT))
     return ts
Exemplo n.º 32
0
    def test_validate_ec2_timestamp_advanced_time(self):

        # EC2 request with Timestamp in advanced time
        timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
        params = {
            'Timestamp': timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
        }
        expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
        self.assertFalse(expired)
Exemplo n.º 33
0
    def test_not_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')

        awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
        previous_meta = {timeutils.strtime(awhile_ago): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Exemplo n.º 34
0
    def format_token(cls, token_ref, roles_ref=None, catalog_ref=None):
        audit_info = None
        user_ref = token_ref['user']
        metadata_ref = token_ref['metadata']
        if roles_ref is None:
            roles_ref = []
        expires = token_ref.get('expires', provider.default_expire_time())
        if expires is not None:
            if not isinstance(expires, six.text_type):
                expires = timeutils.isotime(expires)

        token_data = token_ref.get('token_data')
        if token_data:
            token_audit = token_data.get(
                'access', token_data).get('token', {}).get('audit_ids')
            audit_info = token_audit

        if audit_info is None:
            audit_info = provider.audit_info(token_ref.get('parent_audit_id'))

        o = {'access': {'token': {'id': token_ref['id'],
                                  'expires': expires,
                                  'issued_at': timeutils.strtime(),
                                  'audit_ids': audit_info
                                  },
                        'user': {'id': user_ref['id'],
                                 'name': user_ref['name'],
                                 'username': user_ref['name'],
                                 'roles': roles_ref,
                                 'roles_links': metadata_ref.get('roles_links',
                                                                 [])
                                 }
                        }
             }
        if 'bind' in token_ref:
            o['access']['token']['bind'] = token_ref['bind']
        if 'tenant' in token_ref and token_ref['tenant']:
            token_ref['tenant']['enabled'] = True
            o['access']['token']['tenant'] = token_ref['tenant']
        if catalog_ref is not None:
            o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
                catalog_ref)
        if metadata_ref:
            if 'is_admin' in metadata_ref:
                o['access']['metadata'] = {'is_admin':
                                           metadata_ref['is_admin']}
            else:
                o['access']['metadata'] = {'is_admin': 0}
        if 'roles' in metadata_ref:
            o['access']['metadata']['roles'] = metadata_ref['roles']
        if CONF.trust.enabled and 'trust_id' in metadata_ref:
            o['access']['trust'] = {'trustee_user_id':
                                    metadata_ref['trustee_user_id'],
                                    'id': metadata_ref['trust_id']
                                    }
        return o
Exemplo n.º 35
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid,
                  'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'os_imported_at': import_time,
                      'old_name': sfname}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': volume['name'],
                  'volumeID': sf_ref['volumeID'],
                  'accountID': sfaccount['accountID'],
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Exemplo n.º 36
0
    def _shelve_offload(self, clean_shutdown=True):
        instance = self._create_fake_instance_obj()
        instance.task_state = task_states.SHELVING
        instance.save()
        db_instance = obj_base.obj_to_primitive(instance)
        host = 'fake-mini'
        cur_time = timeutils.utcnow()
        timeutils.set_time_override(cur_time)
        sys_meta = dict(instance.system_metadata)
        sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
        sys_meta['shelved_image_id'] = None
        sys_meta['shelved_host'] = host
        db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute.driver, 'power_off')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve_offload.start')
        if clean_shutdown:
            self.compute.driver.power_off(instance, CONF.shutdown_timeout,
                                          self.compute.SHUTDOWN_RETRY_INTERVAL)
        else:
            self.compute.driver.power_off(instance, 0, 0)
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'],
            {
                'power_state':
                123,
                'host':
                None,
                'node':
                None,
                'vm_state':
                vm_states.SHELVED_OFFLOADED,
                'task_state':
                None,
                'expected_task_state':
                [task_states.SHELVING, task_states.SHELVING_OFFLOADING]
            },
            update_cells=False,
            columns_to_join=[
                'metadata', 'system_metadata', 'info_cache', 'security_groups'
            ],
        ).AndReturn((db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve_offload.end')
        self.mox.ReplayAll()

        self.compute.shelve_offload_instance(self.context,
                                             instance,
                                             clean_shutdown=clean_shutdown)
Exemplo n.º 37
0
 def _new_oauth_token_with_expires_at(self):
     key, secret, token = self._new_oauth_token()
     expires_at = timeutils.strtime()
     params = {
         'oauth_token': key,
         'oauth_token_secret': secret,
         'oauth_expires_at': expires_at
     }
     token = urlparse.urlencode(params)
     return (key, secret, expires_at, token)
Exemplo n.º 38
0
    def test_metadata_is_written(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')

        nowish = timeutils.strtime()
        reason = 'cool as'
        meta_set = self.patchobject(pol, 'metadata_set')
        self.patchobject(timeutils, 'strtime', return_value=nowish)
        pol._cooldown_timestamp(reason)
        meta_set.assert_called_once_with({nowish: reason})
Exemplo n.º 39
0
    def test_metadata_is_written(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        nowish = timeutils.strtime()
        reason = 'cool as'
        meta_set = self.patchobject(pol, 'metadata_set')
        self.patchobject(timeutils, 'strtime', return_value=nowish)
        pol._cooldown_timestamp(reason)
        meta_set.assert_called_once_with({nowish: reason})
Exemplo n.º 40
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'created_at': create_time}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': 'UUID-%s' % volume['id'],
                  'accountID': None,
                  'sliceCount': slice_count,
                  'totalSize': int(volume['size'] * units.Gi),
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        # NOTE(jdg): Check if we're a migration tgt, if so
        # use the old volume-id here for the SF Name
        migration_status = volume.get('migration_status', None)
        if migration_status and 'target' in migration_status:
            k, v = migration_status.split(':')
            params['name'] = 'UUID-%s' % v
            params['attributes']['migration_uuid'] = volume['id']
            params['attributes']['uuid'] = v

        return self._do_volume_create(volume['project_id'], params)
Exemplo n.º 41
0
    def test_create_trust(self):
        expires_at = timeutils.strtime(timeutils.utcnow() + datetime.timedelta(minutes=10), fmt=TIME_FORMAT)
        new_trust = self.create_trust(self.sample_data, self.trustor["name"], expires_at=expires_at)
        self.assertEqual(self.trustor["id"], new_trust["trustor_user_id"])
        self.assertEqual(self.trustee["id"], new_trust["trustee_user_id"])
        role_ids = [self.role_browser["id"], self.role_member["id"]]
        self.assertTrue(timeutils.parse_strtime(new_trust["expires_at"], fmt=TIME_FORMAT))
        self.assertIn("%s/v3/OS-TRUST/" % HOST_URL, new_trust["links"]["self"])
        self.assertIn("%s/v3/OS-TRUST/" % HOST_URL, new_trust["roles_links"]["self"])

        for role in new_trust["roles"]:
            self.assertIn(role["id"], role_ids)
Exemplo n.º 42
0
    def _register_agent_states(self, lbaas_agents=False):
        """Register two L3 agents and two DHCP agents."""
        l3_hosta = {
            'binary': 'neutron-l3-agent',
            'host': L3_HOSTA,
            'topic': topics.L3_AGENT,
            'configurations': {'use_namespaces': True,
                               'router_id': None,
                               'handle_internal_only_routers':
                               True,
                               'gateway_external_network_id':
                               None,
                               'interface_driver': 'interface_driver',
                               },
            'agent_type': constants.AGENT_TYPE_L3}
        l3_hostb = copy.deepcopy(l3_hosta)
        l3_hostb['host'] = L3_HOSTB
        dhcp_hosta = {
            'binary': 'neutron-dhcp-agent',
            'host': DHCP_HOSTA,
            'topic': 'DHCP_AGENT',
            'configurations': {'dhcp_driver': 'dhcp_driver',
                               'use_namespaces': True,
                               },
            'agent_type': constants.AGENT_TYPE_DHCP}
        dhcp_hostc = copy.deepcopy(dhcp_hosta)
        dhcp_hostc['host'] = DHCP_HOSTC
        lbaas_hosta = {
            'binary': 'neutron-loadbalancer-agent',
            'host': LBAAS_HOSTA,
            'topic': 'LOADBALANCER_AGENT',
            'configurations': {'device_drivers': ['haproxy_ns']},
            'agent_type': constants.AGENT_TYPE_LOADBALANCER}
        lbaas_hostb = copy.deepcopy(lbaas_hosta)
        lbaas_hostb['host'] = LBAAS_HOSTB
        callback = agents_db.AgentExtRpcCallback()
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': l3_hosta},
                              time=timeutils.strtime())
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': l3_hostb},
                              time=timeutils.strtime())
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': dhcp_hosta},
                              time=timeutils.strtime())
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': dhcp_hostc},
                              time=timeutils.strtime())

        res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
        if lbaas_agents:
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hosta},
                                  time=timeutils.strtime())
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hostb},
                                  time=timeutils.strtime())
            res += [lbaas_hosta, lbaas_hostb]

        return res
Exemplo n.º 43
0
    def test_shelved_poll_not_timedout(self):
        instance = self._create_fake_instance_obj()
        sys_meta = instance.system_metadata
        shelved_time = timeutils.utcnow()
        timeutils.set_time_override(shelved_time)
        timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
        sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
        db.instance_update_and_get_original(self.context, instance['uuid'],
                {'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})

        self.mox.StubOutWithMock(self.compute.driver, 'destroy')
        self.mox.ReplayAll()
        self.compute._poll_shelved_instances(self.context)
Exemplo n.º 44
0
    def test_shelved_poll_not_timedout(self):
        instance = jsonutils.to_primitive(self._create_fake_instance())
        sys_meta = utils.metadata_to_dict(instance['system_metadata'])
        shelved_time = timeutils.utcnow()
        timeutils.set_time_override(shelved_time)
        timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
        sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
        db.instance_update_and_get_original(self.context, instance['uuid'],
                {'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})

        self.mox.StubOutWithMock(self.compute.driver, 'destroy')
        self.mox.ReplayAll()
        self.compute._poll_shelved_instances(self.context)
Exemplo n.º 45
0
 def to_dict(self):
     default = super(RequestContext, self).to_dict()
     extra = {'user_id': self.user_id,
              'project_id': self.project_id,
              'project_name': self.project_name,
              'domain': self.domain,
              'read_deleted': self.read_deleted,
              'roles': self.roles,
              'remote_address': self.remote_address,
              'timestamp': timeutils.strtime(self.timestamp),
              'quota_class': self.quota_class,
              'service_catalog': self.service_catalog}
     return dict(default.items() + extra.items())
Exemplo n.º 46
0
    def _stub_meta_expected(self, now, data, nmeta=1):
        # Stop time at now
        timeutils.set_time_override(now)
        self.addCleanup(timeutils.clear_time_override)

        # Then set a stub to ensure the metadata update is as
        # expected based on the timestamp and data
        self.m.StubOutWithMock(resource.Resource, 'metadata_set')
        expected = {timeutils.strtime(now): data}
        # Note for ScalingPolicy, we expect to get a metadata
        # update for the policy and autoscaling group, so pass nmeta=2
        for x in range(nmeta):
            resource.Resource.metadata_set(expected).AndReturn(None)
Exemplo n.º 47
0
 def to_dict(self):
     return {'user_id': self._user_id,
             'project_id': self._project_id,
             'domain_id': self._domain_id,
             'domain_name': self._domain_name,
             'roles': self._roles,
             'timestamp': timeutils.strtime(self._timestamp),
             'request_id': self._request_id,
             'auth_token': self._auth_token,
             'user_name': self._user_name,
             'service_catalog': self._service_catalog,
             'project_name': self._project_name,
             'user': self._user}
Exemplo n.º 48
0
    def _stub_meta_expected(self, now, data, nmeta=1):
        # Stop time at now
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)

        # Then set a stub to ensure the metadata update is as
        # expected based on the timestamp and data
        self.m.StubOutWithMock(resource.Resource, 'metadata_set')
        expected = {timeutils.strtime(now): data}
        # Note for ScalingPolicy, we expect to get a metadata
        # update for the policy and autoscaling group, so pass nmeta=2
        for x in range(nmeta):
            resource.Resource.metadata_set(expected).AndReturn(None)
Exemplo n.º 49
0
 def _register_l3_agent(self, host):
     agent = {
         'binary': 'neutron-l3-agent',
         'host': host,
         'topic': topics.L3_AGENT,
         'configurations': {},
         'agent_type': n_constants.AGENT_TYPE_L3,
         'start_flag': True
     }
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': agent},
                           time=timeutils.strtime())
Exemplo n.º 50
0
    def _stub_meta_expected(self, now, data, nmeta=1):
        # Stop time at now
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)

        # Then set a stub to ensure the metadata update is as
        # expected based on the timestamp and data
        self.m.StubOutWithMock(resource.Resource, 'metadata_set')
        expected = {timeutils.strtime(now): data}
        # Note for ScalingPolicy, we expect to get a metadata
        # update for the policy and autoscaling group, so pass nmeta=2
        for x in range(nmeta):
            resource.Resource.metadata_set(expected).AndReturn(None)
Exemplo n.º 51
0
 def _register_l3_agent(self, host):
     agent = {
         'binary': 'neutron-l3-agent',
         'host': host,
         'topic': topics.L3_AGENT,
         'configurations': {},
         'agent_type': n_constants.AGENT_TYPE_L3,
         'start_flag': True
     }
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': agent},
                           time=timeutils.strtime())
Exemplo n.º 52
0
    def test_shelve_volume_backed(self):
        db_instance = jsonutils.to_primitive(self._create_fake_instance())
        instance = objects.Instance.get_by_uuid(
            self.context,
            db_instance['uuid'],
            expected_attrs=['metadata', 'system_metadata'])
        instance.task_state = task_states.SHELVING
        instance.save()
        host = 'fake-mini'
        cur_time = timeutils.utcnow()
        timeutils.set_time_override(cur_time)
        sys_meta = dict(instance.system_metadata)
        sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
        sys_meta['shelved_image_id'] = None
        sys_meta['shelved_host'] = host
        db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute.driver, 'power_off')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve_offload.start')
        self.compute.driver.power_off(instance)
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'],
            {
                'power_state':
                123,
                'host':
                None,
                'node':
                None,
                'vm_state':
                vm_states.SHELVED_OFFLOADED,
                'task_state':
                None,
                'expected_task_state':
                [task_states.SHELVING, task_states.SHELVING_OFFLOADING]
            },
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata'],
        ).AndReturn((db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve_offload.end')
        self.mox.ReplayAll()

        self.compute.shelve_offload_instance(self.context, instance)
Exemplo n.º 53
0
    def test_scaling_policy_cooldown_zero(self):
        t = template_format.parse(as_template)

        # Create the scaling policy (with cooldown=0) and scale up one
        properties = t['resources']['my-policy']['properties']
        properties['cooldown'] = '0'

        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Exemplo n.º 54
0
    def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self,
            req_mock):
        oat_data = {"hosts": [{"host_name": "host1",
                                    "trust_lvl": "trusted",
                                    "vtime": timeutils.strtime(fmt="%c")},
                                   {"host_name": "host2",
                                    "trust_lvl": "trusted",
                                    "vtime": timeutils.strtime(fmt="%D")},
                                    # This is just a broken date to ensure that
                                    # we're not just arbitrarily accepting any
                                    # date format.
                        ]}
        req_mock.return_value = requests.codes.OK, oat_data
        extra_specs = {'trust:trusted_host': 'trusted'}
        filter_properties = {'context': mock.sentinel.ctx,
                             'instance_type': {'memory_mb': 1024,
                                               'extra_specs': extra_specs}}
        host = fakes.FakeHostState('host1', 'host1', {})
        bad_host = fakes.FakeHostState('host2', 'host2', {})

        self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
        self.assertFalse(self.filt_cls.host_passes(bad_host,
                                                   filter_properties))
Exemplo n.º 55
0
    def test_scaling_policy_cooldown_zero(self):
        t = template_format.parse(as_template)

        # Create the scaling policy (with Cooldown=0) and scale up one
        properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
        properties['Cooldown'] = '0'

        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Exemplo n.º 56
0
 def test_get_events(self):
     data = self.get_json(self.PATH, headers=headers)
     self.assertEqual(3, len(data))
     # We expect to get native UTC generated time back
     expected_generated = timeutils.strtime(
         at=timeutils.normalize_time(self.trait_time),
         fmt=timeutils._ISO8601_TIME_FORMAT)
     for event in data:
         self.assertTrue(event['event_type'] in ['Foo', 'Bar', 'Zoo'])
         self.assertEqual(4, len(event['traits']))
         self.assertEqual(expected_generated, event['generated'])
         for trait_name in ['trait_A', 'trait_B',
                            'trait_C', 'trait_D']:
             self.assertTrue(trait_name in map(lambda x: x['name'],
                                               event['traits']))