Exemplo n.º 1
0
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(
                _LW('Hint "%s" dropped because '
                    'ExtendedServerAttributes not active in Nova.'),
                HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        server = nova.API().get_server(context,
                                       instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(
                _LW('Hint "%s" dropped because Nova did not return '
                    'enough information. Either Nova policy needs to '
                    'be changed or a privileged account for Nova '
                    'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
Exemplo n.º 2
0
    def create_volume(self,
                      context,
                      topic,
                      volume_id,
                      snapshot_id=None,
                      image_id=None,
                      request_spec=None,
                      filter_properties=None,
                      volume=None):

        self._wait_for_scheduler()

        # FIXME(thangp): Remove this in v2.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        try:
            flow_engine = create_volume.get_flow(context, db, self.driver,
                                                 request_spec,
                                                 filter_properties, volume,
                                                 snapshot_id, image_id)
        except Exception:
            msg = _("Failed to create scheduler manager volume flow")
            LOG.exception(msg)
            raise exception.CinderException(msg)

        with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
            flow_engine.run()
Exemplo n.º 3
0
    def create(self):
        try:
            ssh = paramiko.SSHClient()
            if ',' in self.hosts_key_file:
                files = self.hosts_key_file.split(',')
                for f in files:
                    ssh.load_host_keys(f)
            else:
                ssh.load_host_keys(self.hosts_key_file)
            # If strict_ssh_host_key_policy is set we want to reject, by
            # default if there is not entry in the known_hosts file.
            # Otherwise we use AutoAddPolicy which accepts on the first
            # Connect but fails if the keys change.  load_host_keys can
            # handle hashed known_host entries.
            if self.strict_ssh_host_key_policy:
                ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
            else:
                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

            if self.password:
                ssh.connect(self.ip,
                            port=self.port,
                            username=self.login,
                            password=self.password,
                            timeout=self.conn_timeout)
            elif self.privatekey:
                pkfile = os.path.expanduser(self.privatekey)
                privatekey = paramiko.RSAKey.from_private_key_file(pkfile)
                ssh.connect(self.ip,
                            port=self.port,
                            username=self.login,
                            pkey=privatekey,
                            timeout=self.conn_timeout)
            else:
                msg = _("Specify a password or private_key")
                raise exception.CinderException(msg)

            # Paramiko by default sets the socket timeout to 0.1 seconds,
            # ignoring what we set through the sshclient. This doesn't help for
            # keeping long lived connections. Hence we have to bypass it, by
            # overriding it after the transport is initialized. We are setting
            # the sockettimeout to None and setting a keepalive packet so that,
            # the server will keep the connection open. All that does is send
            # a keepalive packet every ssh_conn_timeout seconds.
            if self.conn_timeout:
                transport = ssh.get_transport()
                transport.sock.settimeout(None)
                transport.set_keepalive(self.conn_timeout)
            return ssh
        except Exception as e:
            msg = _("Error connecting via ssh: %s") % six.text_type(e)
            LOG.error(msg)
            raise paramiko.SSHException(msg)
Exemplo n.º 4
0
def validate_setup_for_nested_quota_use(ctxt,
                                        resources,
                                        nested_quota_driver,
                                        fix_allocated_quotas=False):
    """Validates the setup supports using nested quotas.

    Ensures that Keystone v3 or greater is being used, that the current
    user is of the cloud admin role, and that the existing quotas make sense to
    nest in the current hierarchy (e.g. that no child quota would be larger
    than it's parent).

    :param resources: the quota resources to validate
    :param nested_quota_driver: nested quota driver used to validate each tree
    :param fix_allocated_quotas: if True, parent projects "allocated" total
        will be calculated based on the existing child limits and the DB will
        be updated. If False, an exception is raised reporting any parent
        allocated quotas are currently incorrect.
    """
    try:
        project_roots = get_all_root_project_ids(ctxt)

        # Now that we've got the roots of each tree, validate the trees
        # to ensure that each is setup logically for nested quotas
        for root in project_roots:
            root_proj = get_project_hierarchy(ctxt, root, subtree_as_ids=True)
            nested_quota_driver.validate_nested_setup(
                ctxt,
                resources, {root_proj.id: root_proj.subtree},
                fix_allocated_quotas=fix_allocated_quotas)
    except exceptions.VersionNotAvailable:
        msg = _("Keystone version 3 or greater must be used to get nested "
                "quota support.")
        raise exception.CinderException(message=msg)
    except exceptions.Forbidden:
        msg = _("Must run this command as cloud admin using "
                "a Keystone policy.json which allows cloud "
                "admin to list and get any project.")
        raise exception.CinderException(message=msg)
Exemplo n.º 5
0
def get_associations(context, specs_id):
    """Get all associations of given qos specs."""
    try:
        # query returns a list of volume types associated with qos specs
        associates = db.qos_specs_associations_get(context, specs_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        msg = _('Failed to get all associations of ' 'qos specs %s') % specs_id
        LOG.warning(msg)
        raise exception.CinderException(message=msg)

    result = []
    for vol_type in associates:
        member = dict(association_type='volume_type')
        member.update(dict(name=vol_type['name']))
        member.update(dict(id=vol_type['id']))
        result.append(member)

    return result