示例#1
0
    def __call__(self, req):
        """Ensures that tenants in url and token are equal.

        Handle incoming request by checking tenant info prom the headers and
        url ({tenant_id} url attribute).

        Pass request downstream on success.
        Reject request if tenant_id from headers not equals to tenant_id from
        url.
        """
        token_tenant = req.environ.get("HTTP_X_TENANT_ID")
        if not token_tenant:
            LOG.warning(_LW("Can't get tenant_id from env"))
            raise ex.HTTPServiceUnavailable()

        path = req.environ["PATH_INFO"]
        if path != "/":
            version, url_tenant, rest = commons.split_path(path, 3, 3, True)
            if not version or not url_tenant or not rest:
                LOG.warning(_LW("Incorrect path: {path}").format(path=path))
                raise ex.HTTPNotFound(_("Incorrect path"))

            if token_tenant != url_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                raise ex.HTTPUnauthorized(_("Token tenant != requested tenant"))
        return self.application
示例#2
0
    def rollback_cluster(self, cluster, reason):
        rollback_info = cluster.rollback_info or {}
        self._update_rollback_strategy(cluster)

        if rollback_info.get('shutdown', False):
            self._rollback_cluster_creation(cluster, reason)
            LOG.warning(
                _LW("Cluster {name} creation rollback "
                    "(reason: {reason})").format(name=cluster.name,
                                                 reason=reason))
            return False

        rollback_count = rollback_info.get('rollback_count', {}).copy()
        target_count = rollback_info.get('target_count', {}).copy()
        if rollback_count or target_count:
            self._rollback_cluster_scaling(cluster, rollback_count,
                                           target_count, reason)
            LOG.warning(
                _LW("Cluster {name} scaling rollback "
                    "(reason: {reason})").format(name=cluster.name,
                                                 reason=reason))

            return True

        return False
示例#3
0
    def __call__(self, req):
        """Ensures that the requested and token tenants match

        Handle incoming requests by checking tenant info from the
        headers and url ({tenant_id} url attribute), if using v1 or v1.1
        APIs. If using the v2 API, this function will check the token
        tenant and the requested tenent in the headers.

        Pass request downstream on success.
        Reject request if tenant_id from headers is not equal to the
        tenant_id from url or v2 project header.
        """
        path = req.environ['PATH_INFO']
        if path != '/':
            token_tenant = req.environ.get("HTTP_X_TENANT_ID")
            if not token_tenant:
                LOG.warning(_LW("Can't get tenant_id from env"))
                raise ex.HTTPServiceUnavailable()

            if path.startswith('/v2'):
                version, rest = commons.split_path(path, 2, 2, True)
                requested_tenant = req.headers.get('OpenStack-Project-ID')
            else:
                version, requested_tenant, rest = commons.split_path(
                    path, 3, 3, True)

            if not version or not requested_tenant or not rest:
                LOG.warning(_LW("Incorrect path: {path}").format(path=path))
                raise ex.HTTPNotFound(_("Incorrect path"))

            if token_tenant != requested_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                raise ex.HTTPUnauthorized(
                    _('Token tenant != requested tenant'))
        return self.application
示例#4
0
文件: api.py 项目: thefuyang/sahara
def delete_cluster_template(ctx, template, rollback=False):
    rollback_msg = " on rollback" if rollback else ""

    # If we are not deleting something that we just created,
    # do usage checks to ensure that the template is not in
    # use by a cluster
    if not rollback:
        clusters = conductor.API.cluster_get_all(ctx)
        cluster_users = u.check_cluster_template_usage(template["id"],
                                                       clusters)

        if cluster_users:
            LOG.warning(_LW("Cluster template {info} "
                        "in use by clusters {clusters}").format(
                            info=u.name_and_id(template),
                            clusters=cluster_users))

            LOG.warning(_LW("Deletion of cluster template "
                        "{info} failed").format(info=u.name_and_id(template)))
            return

    try:
        conductor.API.cluster_template_destroy(ctx, template["id"],
                                               ignore_default=True)
    except Exception as e:
        LOG.warning(_LW("Deletion of cluster template {info} failed{rollback}"
                    ", {reason}").format(info=u.name_and_id(template),
                                         reason=e,
                                         rollback=rollback_msg))
    else:
        LOG.info(_LI("Deleted cluster template {info}{rollback}").format(
            info=u.name_and_id(template), rollback=rollback_msg))
示例#5
0
    def get(self, relpath=None, params=None):
        """Invoke the GET method on a resource

        :param relpath: Optional. A relative path to this resource's path.
        :param params: Key-value data.

        :return: A dictionary of the JSON result.
        """
        for retry in six.moves.xrange(self.retries + 1):
            if retry:
                context.sleep(self.retry_sleep)
            try:
                return self.invoke("GET", relpath, params)
            except (socket.error, urllib.error.URLError) as e:
                if "timed out" in six.text_type(e).lower():
                    if retry < self.retries:
                        LOG.warning(_LW("Timeout issuing GET request for "
                                        "{path}. Will retry").format(
                                            path=self._join_uri(relpath)))
                    else:
                        LOG.warning(_LW("Timeout issuing GET request for "
                                        "{path}. No retries left").format(
                                            path=self._join_uri(relpath)))
                else:
                    raise e
        else:
            raise ex.CMApiException(_("Get retry max time reached."))
示例#6
0
文件: volumes.py 项目: vnogin/sahara
def _check_installed_xfs(instance):
    redhat = "rpm -q xfsprogs || yum install -y xfsprogs"
    debian = "dpkg -s xfsprogs || apt-get -y install xfsprogs"

    cmd_map = {
        "centos": redhat,
        "fedora": redhat,
        "redhatenterpriseserver": redhat,
        "ubuntu": debian,
        'debian': debian
    }

    with instance.remote() as r:
        distro = _get_os_distrib(r)
        if not cmd_map.get(distro):
            LOG.warning(
                _LW("Cannot verify installation of XFS tools for "
                    "unknown distro {distro}.").format(distro=distro))
            return False
        try:
            r.execute_command(cmd_map.get(distro), run_as_root=True)
            return True
        except Exception as e:
            LOG.warning(
                _LW("Cannot install xfsprogs: {reason}").format(reason=e))
            return False
示例#7
0
    def get(self, relpath=None, params=None):
        """Invoke the GET method on a resource

        :param relpath: Optional. A relative path to this resource's path.
        :param params: Key-value data.

        :return: A dictionary of the JSON result.
        """
        for retry in six.moves.xrange(self.retries + 1):
            if retry:
                context.sleep(self.retry_sleep)
            try:
                return self.invoke("GET", relpath, params)
            except (socket.error, urllib2.URLError) as e:
                if "timed out" in six.text_type(e).lower():
                    if retry < self.retries:
                        LOG.warning(
                            _LW("Timeout issuing GET request for "
                                "{path}. Will retry").format(
                                    path=self._join_uri(relpath)))
                    else:
                        LOG.warning(
                            _LW("Timeout issuing GET request for "
                                "{path}. No retries left").format(
                                    path=self._join_uri(relpath)))
                else:
                    raise e
        else:
            raise ex.CMApiException(_("Get retry max time reached."))
示例#8
0
    def _delete_auto_security_group(self, node_group):
        if not node_group.auto_security_group:
            return

        if not node_group.security_groups:
            # node group has no security groups
            # nothing to delete
            return

        name = node_group.security_groups[-1]

        try:
            client = nova.client().security_groups
            security_group = client.get(name)
            if (security_group.name !=
                    g.generate_auto_security_group_name(node_group)):
                LOG.warning(
                    _LW("Auto security group for node group {name} is "
                        "not found").format(name=node_group.name))
                return
            client.delete(name)
        except Exception:
            LOG.warning(
                _LW("Failed to delete security group {name}").format(
                    name=name))
示例#9
0
def check_usage_of_existing(ctx, ng_templates, cl_templates):
    '''Determine if any of the specified templates are in use

    This method searches for the specified templates by name and
    determines whether or not any existing templates are in use
    by a cluster or cluster template. Returns True if any of
    the templates are in use.

    :param ng_templates: A list of dictionaries. Each dictionary
                         has a "template" entry that represents
                         a node group template.
    :param cl_templates: A list of dictionaries. Each dictionary
                         has a "template" entry that represents
                         a cluster template
    :returns: True if any of the templates are in use, False otherwise
    '''
    error = False
    clusters = conductor.API.cluster_get_all(ctx)

    for ng_info in ng_templates:
        ng = u.find_node_group_template_by_name(ctx,
                                                ng_info["template"]["name"])
        if ng:
            cluster_users, template_users = u.check_node_group_template_usage(
                ng["id"], clusters)

            if cluster_users:
                LOG.warning(
                    _LW("Node group template {name} "
                        "in use by clusters {clusters}").format(
                            name=ng["name"], clusters=cluster_users))
            if template_users:
                LOG.warning(
                    _LW("Node group template {name} "
                        "in use by cluster templates {templates}").format(
                            name=ng["name"], templates=template_users))

            if cluster_users or template_users:
                LOG.warning(
                    _LW("Update of node group template "
                        "{name} is not allowed").format(name=ng["name"]))
                error = True

    for cl_info in cl_templates:
        cl = u.find_cluster_template_by_name(ctx, cl_info["template"]["name"])
        if cl:
            cluster_users = u.check_cluster_template_usage(cl["id"], clusters)

            if cluster_users:
                LOG.warning(
                    _LW("Cluster template {name} "
                        "in use by clusters {clusters}").format(
                            name=cl["name"], clusters=cluster_users))

                LOG.warning(
                    _LW("Update of cluster template "
                        "{name} is not allowed").format(name=cl["name"]))
                error = True

    return error
示例#10
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warn(
                    _LW("Attempted to delete non-existent floating IP in "
                        "pool %(pool)s from instance %(instance)s"), {
                            'pool': instance.node_group.floating_ip_pool,
                            'instance': instance.instance_id
                        })

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warn(_LW("Detaching volumes from instance %s failed"),
                     instance.instance_id)

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warn(_LW("Attempted to delete non-existent instance %s"),
                     instance.instance_id)

        conductor.instance_remove(ctx, instance)
示例#11
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(
                    _LW("Attempted to delete non-existent floating IP "
                        "in pool {pool} from instance {instance}").format(
                            pool=instance.node_group.floating_ip_pool,
                            instance=instance.instance_id))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(
                _LW("Detaching volumes from instance {id} failed").format(
                    id=instance.instance_id))

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(
                _LW("Attempted to delete non-existent instance {id}").format(
                    id=instance.instance_id))

        conductor.instance_remove(ctx, instance)
示例#12
0
    def validate_plugin_labels(self, plugin_name, version):
        details = self.get_label_details(plugin_name)
        plb = details.get(PLUGIN_LABELS_SCOPE, {})
        if not plb.get('enabled', {}).get('status'):
            raise ex.InvalidReferenceException(
                _("Plugin %s is not enabled") % plugin_name)

        if plb.get('deprecated', {}).get('status', False):
            LOG.warning(
                _LW("Plugin %s is deprecated and can removed in next "
                    "release") % plugin_name)

        vlb = details.get(VERSION_LABELS_SCOPE, {}).get(version, {})
        if not vlb.get('enabled', {}).get('status'):
            raise ex.InvalidReferenceException(
                _("Version %(version)s of plugin %(plugin)s is not enabled") %
                {
                    'version': version,
                    'plugin': plugin_name
                })

        if vlb.get('deprecated', {}).get('status', False):
            LOG.warning(
                _LW("Using version %(version)s of plugin %(plugin)s is "
                    "deprecated and can removed in next release") % {
                        'version': version,
                        'plugin': plugin_name
                    })
示例#13
0
    def __call__(self, req):
        """Ensures that tenants in url and token are equal.

        Handle incoming request by checking tenant info prom the headers and
        url ({tenant_id} url attribute).

        Pass request downstream on success.
        Reject request if tenant_id from headers not equals to tenant_id from
        url.
        """
        token_tenant = req.environ.get("HTTP_X_TENANT_ID")
        if not token_tenant:
            LOG.warning(_LW("Can't get tenant_id from env"))
            raise ex.HTTPServiceUnavailable()

        path = req.environ['PATH_INFO']
        if path != '/':
            version, url_tenant, rest = commons.split_path(path, 3, 3, True)
            if not version or not url_tenant or not rest:
                LOG.warning(_LW("Incorrect path: {path}").format(path=path))
                raise ex.HTTPNotFound(_("Incorrect path"))

            if token_tenant != url_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                raise ex.HTTPUnauthorized(
                    _('Token tenant != requested tenant'))
        return self.application
示例#14
0
    def __call__(self, req):
        """Ensures that the requested and token tenants match

        Handle incoming requests by checking tenant info from the
        headers and url ({tenant_id} url attribute), if using v1 or v1.1
        APIs. If using the v2 API, this function will check the token
        tenant and the requested tenent in the headers.

        Pass request downstream on success.
        Reject request if tenant_id from headers is not equal to the
        tenant_id from url or v2 project header.
        """
        path = req.environ['PATH_INFO']
        if path != '/':
            token_tenant = req.environ.get("HTTP_X_TENANT_ID")
            if not token_tenant:
                LOG.warning(_LW("Can't get tenant_id from env"))
                raise ex.HTTPServiceUnavailable()

            if path.startswith('/v2'):
                version, rest = commons.split_path(path, 2, 2, True)
                requested_tenant = req.headers.get('OpenStack-Project-ID')
            else:
                version, requested_tenant, rest = commons.split_path(
                    path, 3, 3, True)

            if not version or not requested_tenant or not rest:
                LOG.warning(_LW("Incorrect path: {path}").format(path=path))
                raise ex.HTTPNotFound(_("Incorrect path"))

            if token_tenant != requested_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                raise ex.HTTPUnauthorized(
                    _('Token tenant != requested tenant'))
        return self.application
示例#15
0
def _check_installed_xfs(instance):
    redhat = "rpm -q xfsprogs || yum install -y xfsprogs"
    debian = "dpkg -s xfsprogs || apt-get -y install xfsprogs"

    cmd_map = {
        "centos": redhat,
        "fedora": redhat,
        "redhatenterpriseserver": redhat,
        "ubuntu": debian,
        'debian': debian
    }

    with instance.remote() as r:
        distro = _get_os_distrib(r)
        if not cmd_map.get(distro):
            LOG.warning(
                _LW("Cannot verify installation of XFS tools for "
                    "unknown distro {distro}.").format(distro=distro))
            return False
        try:
            r.execute_command(cmd_map.get(distro), run_as_root=True)
            return True
        except Exception as e:
            LOG.warning(
                _LW("Cannot install xfsprogs: {reason}").format(reason=e))
            return False
示例#16
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(_LW("Attempted to delete non-existent floating IP "
                                "in pool {pool} from instance {instance}")
                            .format(pool=instance.node_group.floating_ip_pool,
                                    instance=instance.instance_id))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(_LW("Detaching volumes from instance {id} failed")
                        .format(id=instance.instance_id))

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(_LW("Attempted to delete non-existent instance {id}")
                        .format(id=instance.instance_id))

        conductor.instance_remove(ctx, instance)
示例#17
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warn(_LW("Attempted to delete non-existent floating IP in "
                         "pool %(pool)s from instance %(instance)s"),
                         {'pool': instance.node_group.floating_ip_pool,
                          'instance': instance.instance_id})

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warn(_LW("Detaching volumes from instance %s failed"),
                     instance.instance_id)

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warn(_LW("Attempted to delete non-existent instance %s"),
                     instance.instance_id)

        conductor.instance_remove(ctx, instance)
示例#18
0
文件: api.py 项目: thefuyang/sahara
def check_usage_of_existing(ctx, ng_templates, cl_templates):
    '''Determine if any of the specified templates are in use

    This method searches for the specified templates by name and
    determines whether or not any existing templates are in use
    by a cluster or cluster template. Returns True if any of
    the templates are in use.

    :param ng_templates: A list of dictionaries. Each dictionary
                         has a "template" entry that represents
                         a node group template.
    :param cl_templates: A list of dictionaries. Each dictionary
                         has a "template" entry that represents
                         a cluster template
    :returns: True if any of the templates are in use, False otherwise
    '''
    error = False
    clusters = conductor.API.cluster_get_all(ctx)

    for ng_info in ng_templates:
        ng = u.find_node_group_template_by_name(ctx,
                                                ng_info["template"]["name"])
        if ng:
            cluster_users, template_users = u.check_node_group_template_usage(
                ng["id"], clusters)

            if cluster_users:
                LOG.warning(_LW("Node group template {name} "
                            "in use by clusters {clusters}").format(
                                name=ng["name"], clusters=cluster_users))
            if template_users:
                LOG.warning(_LW("Node group template {name} "
                            "in use by cluster templates {templates}").format(
                                name=ng["name"], templates=template_users))

            if cluster_users or template_users:
                LOG.warning(_LW("Update of node group template "
                            "{name} is not allowed").format(name=ng["name"]))
                error = True

    for cl_info in cl_templates:
        cl = u.find_cluster_template_by_name(ctx, cl_info["template"]["name"])
        if cl:
            cluster_users = u.check_cluster_template_usage(cl["id"], clusters)

            if cluster_users:
                LOG.warning(_LW("Cluster template {name} "
                            "in use by clusters {clusters}").format(
                                name=cl["name"], clusters=cluster_users))

                LOG.warning(_LW("Update of cluster template "
                            "{name} is not allowed").format(name=cl["name"]))
                error = True

    return error
示例#19
0
文件: cinder.py 项目: uladz/sahara
def validate_config():
    if CONF.cinder.api_version == 1:
        LOG.warning(_LW('The Cinder v1 API is deprecated and will be removed '
                        'after the Juno release.  You should set '
                        'cinder.api_version=2 in your sahara.conf file.'))
    elif CONF.cinder.api_version != 2:
        LOG.warning(_LW('Unsupported Cinder API version: {bad}.  Please set a '
                        'correct value for cinder.api_version in your '
                        'sahara.conf file (currently supported versions are: '
                        '{supported}). Falling back to Cinder API version 2.')
                    .format(bad=CONF.cinder.api_version,
                            supported=[1, 2]))
        CONF.set_override('api_version', 2, group='cinder')
示例#20
0
def validate_config():
    if CONF.cinder.api_version == 1:
        LOG.warning(_LW('The Cinder v1 API is deprecated and will be removed '
                        'after the Juno release.  You should set '
                        'cinder.api_version=2 in your sahara.conf file.'))
    elif CONF.cinder.api_version != 2:
        LOG.warning(_LW('Unsupported Cinder API version: {bad}.  Please set a '
                        'correct value for cinder.api_version in your '
                        'sahara.conf file (currently supported versions are: '
                        '{supported}). Falling back to Cinder API version 2.')
                    .format(bad=CONF.cinder.api_version,
                            supported=[1, 2]))
        CONF.set_override('api_version', 2, group='cinder', enforce_type=True)
示例#21
0
文件: api.py 项目: thefuyang/sahara
def do_cluster_template_delete_by_id():
    ctx = Context(is_admin=True)

    # Make sure it's a default
    t = conductor.API.cluster_template_get(ctx, CONF.command.id)
    if t:
        if t["is_default"]:
            delete_cluster_template(ctx, t)
        else:
            LOG.warning(_LW("Deletion of cluster template {info} skipped, "
                        "not a default template").format(
                            info=u.name_and_id(t)))
    else:
        LOG.warning(_LW("Deletion of cluster template {id} failed, "
                    "no such template").format(id=CONF.command.id))
 def set_user_password(instance):
     LOG.debug('Setting password for user "mapr"')
     if self.mapr_user_exists(instance):
         with instance.remote() as r:
             r.execute_command('echo "%s:%s"|chpasswd' % ("mapr", "mapr"), run_as_root=True)
     else:
         LOG.warning(_LW('User "mapr" does not exists'))
示例#23
0
    def __init__(self,
                 user_id=None,
                 tenant_id=None,
                 token=None,
                 service_catalog=None,
                 username=None,
                 tenant_name=None,
                 roles=None,
                 is_admin=None,
                 remote_semaphore=None,
                 auth_uri=None,
                 **kwargs):
        if kwargs:
            LOG.warn(_LW('Arguments dropped when creating context: %s'),
                     kwargs)

        self.user_id = user_id
        self.tenant_id = tenant_id
        self.token = token
        self.service_catalog = service_catalog
        self.username = username
        self.tenant_name = tenant_name
        self.is_admin = is_admin
        self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
            CONF.cluster_remote_threshold)
        self.roles = roles
        if auth_uri:
            self.auth_uri = auth_uri
        else:
            self.auth_uri = _get_auth_uri()
示例#24
0
def setup_common(possible_topdir, service_name):
    dev_conf = os.path.join(possible_topdir,
                            'etc',
                            'sahara',
                            'sahara.conf')
    config_files = None
    if os.path.exists(dev_conf):
        config_files = [dev_conf]

    config.parse_configs(config_files)
    log.setup("sahara")

    LOG.info(_LI('Starting Sahara %s'), service_name)

    # Validate other configurations (that may produce logs) here
    cinder.validate_config()

    messaging.setup()

    if service_name != 'all-in-one':
        LOG.warn(
            _LW("Distributed mode is in the alpha state, it's recommended to "
                "use all-in-one mode by running 'sahara-all' binary."))

    plugins_base.setup_plugins()
示例#25
0
    def __init__(self,
                 user_id=None,
                 tenant_id=None,
                 auth_token=None,
                 service_catalog=None,
                 username=None,
                 tenant_name=None,
                 roles=None,
                 is_admin=None,
                 remote_semaphore=None,
                 auth_uri=None,
                 **kwargs):
        if kwargs:
            LOG.warn(_LW('Arguments dropped when creating context: %s'),
                     kwargs)

        super(Context, self).__init__(auth_token=auth_token,
                                      user=user_id,
                                      tenant=tenant_id,
                                      is_admin=is_admin)
        self.service_catalog = service_catalog
        self.username = username
        self.tenant_name = tenant_name
        self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
            CONF.cluster_remote_threshold)
        self.roles = roles
        if auth_uri:
            self.auth_uri = auth_uri
        else:
            self.auth_uri = _get_auth_uri()
示例#26
0
文件: main.py 项目: a9261/sahara
def setup_common(possible_topdir, service_name):
    dev_conf = os.path.join(possible_topdir,
                            'etc',
                            'sahara',
                            'sahara.conf')
    config_files = None
    if os.path.exists(dev_conf):
        config_files = [dev_conf]

    config.parse_configs(config_files)
    log.setup("sahara")

    LOG.info(_LI('Starting Sahara %s'), service_name)

    # Validate other configurations (that may produce logs) here
    cinder.validate_config()

    messaging.setup()

    if service_name != 'all-in-one':
        LOG.warn(
            _LW("Distributed mode is in the alpha state, it's recommended to "
                "use all-in-one mode by running 'sahara-all' binary."))

    plugins_base.setup_plugins()
示例#27
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume %s from instance %s" % (
            volume_id, instance.instance_name))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.exception(_LE("Can't detach volume %s"), volume.id)

    detach_timeout = CONF.detach_volume_timeout
    LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
                                                          volume_id))
    s_time = tu.utcnow()
    while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
        volume = cinder.get_volume(volume_id)
        if volume.status not in ['available', 'error']:
            context.sleep(2)
        else:
            LOG.debug("Volume %s has been detached" % volume_id)
            return
    else:
        LOG.warn(_LW("Can't detach volume %(volume)s. "
                     "Current status of volume: %(status)s"),
                 {'volume': volume_id, 'status': volume.status})
示例#28
0
def execute_with_retries(method, *args, **kwargs):
    attempts = CONF.retries.retries_number + 1
    while attempts > 0:
        try:
            return method(*args, **kwargs)
        except Exception as e:
            error_code = getattr(e, 'http_status', None) or getattr(
                e, 'status_code', None) or getattr(e, 'code', None)
            if error_code in ERRORS_TO_RETRY:
                LOG.warning(_LW('Occasional error occurred during "{method}" '
                                'execution: {error_msg} ({error_code}). '
                                'Operation will be retried.').format(
                            method=method.__name__,
                            error_msg=e,
                            error_code=error_code))
                attempts -= 1
                retry_after = getattr(e, 'retry_after', 0)
                context.sleep(max(retry_after, CONF.retries.retry_after))
            else:
                LOG.debug('Permanent error occurred during "{method}" '
                          'execution: {error_msg}.'.format(
                              method=method.__name__, error_msg=e))
                raise e
    else:
        raise ex.MaxRetriesExceeded(attempts, method.__name__)
示例#29
0
文件: volumes.py 项目: vnogin/sahara
def _format_device(instance,
                   device,
                   use_xfs,
                   formatted_devices=None,
                   lock=None):
    with instance.remote() as r:
        try:
            timeout = _get_timeout_for_disk_preparing(instance.cluster)

            # Format devices with better performance options:
            # - reduce number of blocks reserved for root to 1%
            # - use 'dir_index' for faster directory listings
            # - use 'extents' to work faster with large files
            # - disable journaling
            fs_opts = '-F -m 1 -O dir_index,extents,^has_journal'
            command = 'sudo mkfs.ext4 %s %s' % (fs_opts, device)
            if use_xfs:
                command = 'sudo mkfs.xfs -f %s' % device
            r.execute_command(command, timeout=timeout)
            if lock:
                with lock:
                    formatted_devices.append(device)
        except Exception as e:
            LOG.warning(
                _LW("Device {dev} cannot be formatted: {reason}").format(
                    dev=device, reason=e))
            cpo.add_fail_event(instance, e)
示例#30
0
    def __call__(self, env, start_response):
        """Ensures that tenants in url and token are equal.

        Handle incoming request by checking tenant info prom the headers and
        url ({tenant_id} url attribute).

        Pass request downstream on success.
        Reject request if tenant_id from headers not equals to tenant_id from
        url.
        """
        token_tenant = env['HTTP_X_TENANT_ID']
        if not token_tenant:
            LOG.warn(_LW("Can't get tenant_id from env"))
            resp = ex.HTTPServiceUnavailable()
            return resp(env, start_response)

        path = env['PATH_INFO']
        if path != '/':
            version, url_tenant, rest = commons.split_path(path, 3, 3, True)
            if not version or not url_tenant or not rest:
                LOG.info(_LI("Incorrect path: %s"), path)
                resp = ex.HTTPNotFound(_("Incorrect path"))
                return resp(env, start_response)

            if token_tenant != url_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                resp = ex.HTTPUnauthorized(
                    _('Token tenant != requested tenant'))
                return resp(env, start_response)

        return self.app(env, start_response)
示例#31
0
def terminate_cluster(ctx, cluster, description):
    if CONF.use_identity_api_v3:
        trusts.use_os_admin_auth_token(cluster)

        LOG.debug('Terminating {description} cluster {cluster} '
                  'in "{status}" state with id {id}'
                  .format(cluster=cluster.name,
                          id=cluster.id,
                          status=cluster.status,
                          description=description))

        try:
            ops.terminate_cluster(cluster.id)
        except Exception as e:
            LOG.warning(_LW('Failed to terminate {description} cluster '
                            '{cluster} in "{status}" state with id {id}: '
                            '{error}.').format(cluster=cluster.name,
                                               id=cluster.id,
                                               error=six.text_type(e),
                                               status=cluster.status,
                                               description=description))

    else:
        if cluster.status != 'AwaitingTermination':
            conductor.cluster_update(
                ctx,
                cluster,
                {'status': 'AwaitingTermination'})
示例#32
0
    def create_cluster(self, cluster):
        ctx = context.ctx()

        launcher = _CreateLauncher()

        try:
            target_count = self._get_ng_counts(cluster)
            self._nullify_ng_counts(cluster)

            cluster = conductor.cluster_get(ctx, cluster)
            launcher.launch_instances(ctx, cluster, target_count)

            cluster = conductor.cluster_get(ctx, cluster)
            self._add_volumes(ctx, cluster)

        except Exception as ex:
            with excutils.save_and_reraise_exception():
                if not g.check_cluster_exists(cluster):
                    LOG.info(g.format_cluster_deleted_message(cluster))
                    return
                self._log_operation_exception(
                    _LW("Can't start cluster '%(cluster)s' "
                        "(reason: %(reason)s)"), cluster, ex)

                cluster = g.change_cluster_status(
                    cluster, "Error", status_description=six.text_type(ex))
                self._rollback_cluster_creation(cluster)
示例#33
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume %s from instance %s" %
                  (volume_id, instance.instance_name))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.exception(_LE("Can't detach volume %s"), volume.id)

    detach_timeout = CONF.detach_volume_timeout
    LOG.debug("Waiting %d seconds to detach %s volume" %
              (detach_timeout, volume_id))
    s_time = tu.utcnow()
    while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
        volume = cinder.get_volume(volume_id)
        if volume.status not in ['available', 'error']:
            context.sleep(2)
        else:
            LOG.debug("Volume %s has been detached" % volume_id)
            return
    else:
        LOG.warn(
            _LW("Can't detach volume %(volume)s. "
                "Current status of volume: %(status)s"), {
                    'volume': volume_id,
                    'status': volume.status
                })
示例#34
0
    def __call__(self, env, start_response):
        """Ensures that tenants in url and token are equal.

        Handle incoming request by checking tenant info prom the headers and
        url ({tenant_id} url attribute).

        Pass request downstream on success.
        Reject request if tenant_id from headers not equals to tenant_id from
        url.
        """
        token_tenant = env["HTTP_X_TENANT_ID"]
        if not token_tenant:
            LOG.warn(_LW("Can't get tenant_id from env"))
            resp = ex.HTTPServiceUnavailable()
            return resp(env, start_response)

        path = env["PATH_INFO"]
        if path != "/":
            version, url_tenant, rest = commons.split_path(path, 3, 3, True)
            if not version or not url_tenant or not rest:
                LOG.info(_LI("Incorrect path: %s"), path)
                resp = ex.HTTPNotFound(_("Incorrect path"))
                return resp(env, start_response)

            if token_tenant != url_tenant:
                LOG.debug("Unauthorized: token tenant != requested tenant")
                resp = ex.HTTPUnauthorized(_("Token tenant != requested tenant"))
                return resp(env, start_response)

        return self.app(env, start_response)
示例#35
0
def terminate_cluster(ctx, cluster, description):
    if CONF.use_identity_api_v3:
        trusts.use_os_admin_auth_token(cluster)

        LOG.debug(
            'Terminating %(description)s cluster %(cluster)s '
            'in "%(status)s" state with id %(id)s', {
                'cluster': cluster.name,
                'id': cluster.id,
                'status': cluster.status,
                'description': description
            })

        try:
            ops.terminate_cluster(cluster.id)
        except Exception as e:
            LOG.warn(
                _LW('Failed to terminate %(description)s cluster '
                    '%(cluster)s in "%(status)s" state with id %(id)s: '
                    '%(error)s.'), {
                        'cluster': cluster.name,
                        'id': cluster.id,
                        'error': six.text_type(e),
                        'status': cluster.status,
                        'description': description
                    })

    else:
        if cluster.status != 'AwaitingTermination':
            conductor.cluster_update(ctx, cluster,
                                     {'status': 'AwaitingTermination'})
示例#36
0
文件: context.py 项目: COSHPC/sahara
 def __init__(self,
              user_id=None,
              tenant_id=None,
              token=None,
              service_catalog=None,
              username=None,
              tenant_name=None,
              roles=None,
              is_admin=None,
              remote_semaphore=None,
              auth_uri=None,
              **kwargs):
     if kwargs:
         LOG.warn(_LW('Arguments dropped when creating context: %s'),
                  kwargs)
     self.user_id = user_id
     self.tenant_id = tenant_id
     self.token = token
     self.service_catalog = service_catalog
     self.username = username
     self.tenant_name = tenant_name
     self.is_admin = is_admin
     self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
         CONF.cluster_remote_threshold)
     self.roles = roles
     self.auth_uri = auth_uri or acl.AUTH_URI
示例#37
0
def do_cluster_template_delete_by_id():
    ctx = Context(is_admin=True)

    # Make sure it's a default
    t = conductor.API.cluster_template_get(ctx, CONF.command.id)
    if t:
        if t["is_default"]:
            delete_cluster_template(ctx, t)
        else:
            LOG.warning(
                _LW("Deletion of cluster template {info} skipped, "
                    "not a default template").format(info=u.name_and_id(t)))
    else:
        LOG.warning(
            _LW("Deletion of cluster template {id} failed, "
                "no such template").format(id=CONF.command.id))
示例#38
0
    def _merge_configurations(cluster_spec, src_config_name, dst_config_name):
        LOG.info(
            _LI('Merging configuration properties: %(source)s -> '
                '%(destination)s'), {
                    'source': src_config_name,
                    'destination': dst_config_name
                })

        src_config = cluster_spec.configurations[src_config_name]
        dst_config = cluster_spec.configurations[dst_config_name]

        if src_config is None:
            LOG.warning(
                _LW('Missing source configuration property set, '
                    'aborting merge: {0}').format(src_config_name))
        elif dst_config is None:
            LOG.warning(
                _LW('Missing destination configuration property set, '
                    'aborting merge: {0}').format(dst_config_name))
        else:
            for property_name, property_value in six.iteritems(src_config):
                if property_name in dst_config:
                    if dst_config[property_name] == src_config[property_name]:
                        LOG.debug('Skipping unchanged configuration property '
                                  'in {0} and {1}: {2}'.format(
                                      dst_config_name, src_config_name,
                                      property_name))
                    else:
                        LOG.warning(
                            _LW('Overwriting existing configuration '
                                'property in %(dst_config_name)s from '
                                '%(src_config_name)s for Hue: '
                                '%(property_name)s '
                                '[%(dst_config)s -> %(src_config)s]'), {
                                    'dst_config_name': dst_config_name,
                                    'src_config_name': src_config_name,
                                    'property_name': property_name,
                                    'dst_config': dst_config[property_name],
                                    'src_config': src_config[property_name]
                                })
                else:
                    LOG.debug('Adding Hue configuration property to {0} from '
                              '{1}: {2}'.format(dst_config_name,
                                                src_config_name,
                                                property_name))

                dst_config[property_name] = property_value
示例#39
0
 def _log_operation_exception(self, message, cluster, ex):
     # we want to log the initial exception even if cluster was deleted
     cluster_name = cluster.name if cluster is not None else '_unknown_'
     LOG.warn(message, {'cluster': cluster_name, 'reason': ex})
     if cluster is None:
         LOG.warn(
             _LW("Presumably the operation failed because the cluster was "
                 "deleted by a user during the process."))
示例#40
0
文件: api.py 项目: shamim8888/sahara
def drop_db():
    try:
        engine = get_engine()
        m.Cluster.metadata.drop_all(engine)
    except Exception as e:
        LOG.warning(_LW("Database shutdown exception: {exc}").format(exc=e))
        return False
    return True
示例#41
0
文件: context.py 项目: uladz/sahara
def get_auth_token():
    cur = current()
    if cur.auth_plugin:
        try:
            cur.auth_token = sessions.cache().token_for_auth(cur.auth_plugin)
        except Exception as e:
            LOG.warning(_LW("Cannot update token, reason: {reason}"), e)
    return cur.auth_token
示例#42
0
def drop_db():
    try:
        engine = get_engine()
        m.Cluster.metadata.drop_all(engine)
    except Exception as e:
        LOG.warning(_LW("Database shutdown exception: {exc}").format(exc=e))
        return False
    return True
示例#43
0
def delete_node_group_template(ctx, template, rollback=False):
    rollback_msg = " on rollback" if rollback else ""

    # If we are not deleting something that we just created,
    # do usage checks to ensure that the template is not in
    # use by a cluster or a cluster template
    if not rollback:
        clusters = conductor.API.cluster_get_all(ctx)
        cluster_templates = conductor.API.cluster_template_get_all(ctx)
        cluster_users, template_users = u.check_node_group_template_usage(
            template["id"], clusters, cluster_templates)

        if cluster_users:
            LOG.warning(
                _LW("Node group template {info} "
                    "in use by clusters {clusters}").format(
                        info=u.name_and_id(template), clusters=cluster_users))
        if template_users:
            LOG.warning(
                _LW("Node group template {info} "
                    "in use by cluster templates {templates}").format(
                        info=u.name_and_id(template),
                        templates=template_users))

        if cluster_users or template_users:
            LOG.warning(
                _LW("Deletion of node group template "
                    "{info} failed").format(info=u.name_and_id(template)))
            return

    try:
        conductor.API.node_group_template_destroy(ctx,
                                                  template["id"],
                                                  ignore_prot_on_def=True)
    except Exception as e:
        LOG.warning(
            _LW("Deletion of node group template {info} "
                "failed{rollback}, {reason}").format(
                    info=u.name_and_id(template),
                    reason=e,
                    rollback=rollback_msg))
    else:
        LOG.info(
            _LI("Deleted node group template {info}{rollback}").format(
                info=u.name_and_id(template), rollback=rollback_msg))
示例#44
0
文件: cinder.py 项目: msionkin/sahara
def validate_config():
    if CONF.cinder.api_version != 2:
        LOG.warning(_LW('Unsupported Cinder API version: {bad}.  Please set a '
                        'correct value for cinder.api_version in your '
                        'sahara.conf file (currently supported versions are: '
                        '{supported}). Falling back to Cinder API version 2.')
                    .format(bad=CONF.cinder.api_version,
                            supported=[2]))
        CONF.set_override('api_version', 2, group='cinder', enforce_type=True)
示例#45
0
文件: api.py 项目: shamim8888/sahara
def setup_db():
    try:
        engine = get_engine()
        m.Cluster.metadata.create_all(engine)
    except sa.exc.OperationalError as e:
        LOG.warning(_LW("Database registration exception: {exc}")
                    .format(exc=e))
        return False
    return True
示例#46
0
def setup_db():
    try:
        engine = get_engine()
        m.Cluster.metadata.create_all(engine)
    except sa.exc.OperationalError as e:
        LOG.warning(
            _LW("Database registration exception: {exc}").format(exc=e))
        return False
    return True
示例#47
0
def _get_infrastructure_engine():
    """Import and return one of sahara.service.*_engine.py modules."""
    if CONF.infrastructure_engine != "heat":
        LOG.warning(_LW("Engine {engine} is not supported. Loading Heat "
                        "infrastructure engine instead.").format(
            engine=CONF.infrastructure_engine))
    LOG.debug("Infrastructure engine {engine} is loading".format(
        engine=INFRASTRUCTURE_ENGINE))
    return _load_driver('sahara.infrastructure.engine', INFRASTRUCTURE_ENGINE)
示例#48
0
 def set_user_password(instance):
     LOG.debug('Setting password for user "mapr"')
     if self.mapr_user_exists(instance):
         with instance.remote() as r:
             r.execute_command('echo "%s:%s"|chpasswd' %
                               ('mapr', 'mapr'),
                               run_as_root=True)
     else:
         LOG.warning(_LW('User "mapr" does not exists'))
示例#49
0
文件: deploy.py 项目: jlozadad/sahara
def _get_topology_data(cluster):
    if not t_helper.is_data_locality_enabled():
        return {}

    LOG.warning(
        _LW("Node group awareness is not implemented in YARN yet "
            "so enable_hypervisor_awareness set to False "
            "explicitly"))
    return t_helper.generate_topology_map(cluster, is_node_awareness=False)
示例#50
0
 def create_home_mapr(instance):
     target_path = '/home/mapr'
     LOG.debug("Creating home directory for user 'mapr'")
     args = {'path': target_path}
     cmd = 'mkdir -p %(path)s && chown mapr:mapr %(path)s' % args
     if self.mapr_user_exists(instance):
         with instance.remote() as r:
             r.execute_command(cmd, run_as_root=True)
     else:
         LOG.warning(_LW('User "mapr" does not exists'))
示例#51
0
文件: api.py 项目: thefuyang/sahara
def do_cluster_template_delete():
    ctx = Context(tenant_id=CONF.command.tenant_id)

    template_name = CONF.command.template_name
    t = u.find_cluster_template_by_name(ctx, template_name)
    if t:
        delete_cluster_template(ctx, t)
    else:
        LOG.warning(_LW("Deletion of cluster template {name} failed, "
                    "no such template").format(name=template_name))
示例#52
0
    def scale_cluster(self, cluster, target_count):
        ctx = context.ctx()

        rollback_count = self._get_ng_counts(cluster)

        launcher = _ScaleLauncher()

        try:
            launcher.launch_instances(ctx, cluster, target_count)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                if not g.check_cluster_exists(cluster):
                    LOG.info(g.format_cluster_deleted_message(cluster))
                    return
                self._log_operation_exception(
                    _LW("Can't scale cluster '%(cluster)s' "
                        "(reason: %(reason)s)"), cluster, ex)

                cluster = conductor.cluster_get(ctx, cluster)

                try:
                    self._rollback_cluster_scaling(
                        ctx, cluster, rollback_count, target_count)
                except Exception:
                    if not g.check_cluster_exists(cluster):
                        LOG.info(g.format_cluster_deleted_message(cluster))
                        return
                    # if something fails during the rollback, we stop
                    # doing anything further
                    cluster = g.change_cluster_status(cluster, "Error")
                    LOG.error(_LE("Unable to complete rollback, aborting"))
                    raise

                cluster = g.change_cluster_status(cluster, "Active")
                LOG.warn(
                    _LW("Rollback successful. "
                        "Throwing off an initial exception."))
        finally:
            cluster = conductor.cluster_get(ctx, cluster)
            g.clean_cluster_from_empty_ng(cluster)

        return launcher.inst_ids
    def rollback_cluster(self, cluster, reason):
        rollback_info = cluster.rollback_info or {}
        self._update_rollback_strategy(cluster)

        if rollback_info.get('shutdown', False):
            self._rollback_cluster_creation(cluster, reason)
            LOG.warning(_LW("Cluster creation rollback "
                            "(reason: {reason})").format(reason=reason))
            return False

        instance_ids = rollback_info.get('instance_ids', [])
        if instance_ids:
            self._rollback_cluster_scaling(
                cluster, g.get_instances(cluster, instance_ids), reason)
            LOG.warning(_LW("Cluster scaling rollback "
                            "(reason: {reason})").format(reason=reason))

            return True

        return False
示例#54
0
文件: shares.py 项目: crobby/sahara
 def setup_instance(cls, remote):
     """Prepares an instance to mount this type of share."""
     response = remote.execute_command('lsb_release -is')
     distro = response[1].strip().lower()
     if distro in cls._NFS_CHECKS:
         command = cls._NFS_CHECKS[distro]
         remote.execute_command(command, run_as_root=True)
     else:
         LOG.warning(
             _LW("Cannot verify installation of NFS mount tools for "
                 "unknown distro {distro}.").format(distro=distro))