Ejemplo n.º 1
0
def domain_for_proxy():
    '''Return the proxy domain or None

    If configured to use the proxy domain, this function will return that
    domain. If not configured to use the proxy domain, this function will
    return None. If the proxy domain can't be found this will raise an
    exception.

    :returns: A Keystone Domain object or None.
    :raises ConfigurationError: If the domain is requested but not specified.
    :raises NotFoundException: If the domain name is specified but cannot be
                               found.

    '''
    if CONF.use_domain_for_proxy_users is False:
        return None
    if CONF.proxy_user_domain_name is None:
        raise ex.ConfigurationError(_('Proxy domain requested but not '
                                      'specified.'))
    admin = k.client_for_admin()

    global PROXY_DOMAIN
    if not PROXY_DOMAIN:
        domain_list = admin.domains.list(name=CONF.proxy_user_domain_name)
        if len(domain_list) == 0:
            raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
                                       message=_('Failed to find domain %s'))
        # the domain name should be globally unique in Keystone
        if len(domain_list) > 1:
            raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
                                       message=_('Unexpected results found '
                                                 'when searching for domain '
                                                 '%s'))
        PROXY_DOMAIN = domain_list[0]
    return PROXY_DOMAIN
Ejemplo n.º 2
0
def proxy_user_delete(username=None, user_id=None):
    '''Delete the user from the proxy domain.

    :param username: The name of the user to delete.
    :param user_id: The id of the user to delete, if provided this overrides
                    the username.
    :raises NotFoundException: If there is an error locating the user in the
                               proxy domain.

    '''
    admin = k.client_for_admin()
    if not user_id:
        domain = domain_for_proxy()
        user_list = b.execute_with_retries(admin.users.list,
                                           domain=domain.id,
                                           name=username)
        if len(user_list) == 0:
            raise ex.NotFoundException(
                value=username, message_template=_('Failed to find user %s'))
        if len(user_list) > 1:
            raise ex.NotFoundException(
                value=username,
                message_template=_('Unexpected results found when searching '
                                   'for user %s'))
        user_id = user_list[0].id
    b.execute_with_retries(admin.users.delete, user_id)
    LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
Ejemplo n.º 3
0
def check_cluster_create(data, **kwargs):
    b.check_cluster_unique_name(data['name'])
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data['hadoop_version'])
    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data['hadoop_version'],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data['hadoop_version'], default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data['hadoop_version'],
                               data['anti_affinity'])

    if data.get('node_groups'):
        proxy_gateway_used = len([
            ng
            for ng in data['node_groups'] if ng.get('is_proxy_gateway', False)
        ]) > 0
        b.check_network_config(data['node_groups'], proxy_gateway_used)
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used "
                  "with 'use_neutron=False'"))
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException('neutron_management_network',
                                       message=_("'%s' field is not found"))
Ejemplo n.º 4
0
def data_source_update(context, values):
    session = get_session()
    try:
        with session.begin():
            ds_id = values['id']
            data_source = _data_source_get(context, session, ds_id)
            if not data_source:
                raise ex.NotFoundException(ds_id,
                                           _("DataSource id '%s' not found"))
            else:
                jobs = job_execution_get_all(context)
                pending_jobs = [
                    job for job in jobs if job.info["status"] == "PENDING"
                ]
                for job in pending_jobs:
                    if job.data_source_urls:
                        if ds_id in job.data_source_urls:
                            raise ex.UpdateFailedException(
                                _("DataSource is used in a "
                                  "PENDING Job and can not be updated."))
            data_source.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for DataSource: %s") % e.columns)

    return data_source
Ejemplo n.º 5
0
def node_group_template_update(context, values, ignore_default=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))
            elif not ignore_default and ngt.is_default:
                raise ex.UpdateFailedException(
                    ngt_id,
                    _("NodeGroupTemplate id '%s' can not be updated. "
                      "It is a default template."))

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster."))

            ngt.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 6
0
        def handler(*args, **kwargs):
            if id_prop and not get_args:
                get_args['id'] = id_prop[0]

            if 'marker' in id_prop:
                if 'marker' not in u.get_request_args():
                    return func(*args, **kwargs)
                kwargs['marker'] = u.get_request_args()['marker']

            get_kwargs = {}
            for get_arg in get_args:
                get_kwargs[get_arg] = kwargs[get_args[get_arg]]

            obj = None
            try:
                obj = get_func(**get_kwargs)
            except Exception as e:
                cls_name = reflection.get_class_name(e, fully_qualified=False)
                if 'notfound' not in cls_name.lower():
                    raise e
            if obj is None:
                e = ex.NotFoundException(get_kwargs,
                                         _('Object with %s not found'))
                return u.not_found(e)
            if 'marker' in kwargs:
                del (kwargs['marker'])
            return func(*args, **kwargs)
Ejemplo n.º 7
0
def to_wrapped_dict(func, id, *args, **kwargs):
    obj = func(id, *args, **kwargs)
    if obj is None:
        e = ex.NotFoundException({'id': id}, _('Object with %s not found'))

        return not_found(e)
    return render(obj.to_wrapped_dict())
Ejemplo n.º 8
0
def get_stack(stack_name):
    heat = client()
    for stack in heat.stacks.list(filters={'stack_name': stack_name}):
        return stack

    raise ex.NotFoundException(
        _('Failed to find stack %(stack)s') % {'stack': stack_name})
Ejemplo n.º 9
0
def _check_cluster_create(data):
    plugin_version = 'hadoop_version'
    if data.get('plugin_version'):
        plugin_version = 'plugin_version'
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data[plugin_version])
    b.check_plugin_labels(
        data['plugin_name'], data[plugin_version])

    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data[plugin_version],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data[plugin_version],
                                    default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data[plugin_version],
                               data['anti_affinity'])

    if data.get('node_groups'):
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        b.check_network_exists(neutron_net_id)
    else:
        raise ex.NotFoundException('neutron_management_network',
                                   _("'%s' field is not found"))
Ejemplo n.º 10
0
def node_group_template_update(context, values, ignore_prot_on_def=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))

            validate.check_tenant_for_update(context, ngt)
            if not (ngt.is_default and ignore_prot_on_def):
                validate.check_protected_from_update(ngt, values)

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster."))

            ngt.update(values)

            # Here we update any cluster templates that reference the
            # updated node group template
            for template_relationship in ngt.templates_relations:
                ct_id = template_relationship.cluster_template_id
                ct = cluster_template_get(
                    context, template_relationship.cluster_template_id)
                node_groups = ct.node_groups
                ct_node_groups = []
                for ng in node_groups:
                    # Need to fill in all node groups, not just
                    # the modified group
                    ng_to_add = ng
                    if ng.node_group_template_id == ngt_id:
                        # use the updated node group template
                        ng_to_add = ngt
                    ng_to_add = ng_to_add.to_dict()
                    ng_to_add.update({
                        "count":
                        ng["count"],
                        "node_group_template_id":
                        ng.node_group_template_id
                    })
                    ng_to_add.pop("updated_at", None)
                    ng_to_add.pop("created_at", None)
                    ng_to_add.pop("id", None)
                    ct_node_groups.append(ng_to_add)
                ct_update = {"id": ct_id, "node_groups": ct_node_groups}
                cluster_template_update(context, ct_update, ignore_prot_on_def)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 11
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException({'id': cluster_id},
                                   _('Object with %s not found'))

    b.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine
            and not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {
                "old_engine": cluster.sahara_info.get('infrastructure_engine'),
                "new_engine": engine_type_and_version
            })

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Ejemplo n.º 12
0
Archivo: api.py Proyecto: uladz/sahara
def cluster_template_update(context, values, ignore_default=False):
    explicit_node_groups = "node_groups" in values
    if explicit_node_groups:
        node_groups = values.pop("node_groups")
        if node_groups is None:
            node_groups = []

    session = get_session()
    cluster_template_id = values['id']
    try:
        with session.begin():
            cluster_template = (_cluster_template_get(
                context, session, cluster_template_id))
            if not cluster_template:
                raise ex.NotFoundException(
                    cluster_template_id,
                    _("Cluster Template id '%s' not found!"))

            elif not ignore_default and cluster_template.is_default:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("ClusterTemplate id '%s' can not be updated. "
                      "It is a default template.")
                )

            validate.check_tenant_for_update(context, cluster_template)
            validate.check_protected_from_update(cluster_template, values)

            if len(cluster_template.clusters) > 0:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("Cluster Template id '%s' can not be updated. "
                      "It is referenced by at least one cluster.")
                )
            cluster_template.update(values)
            # The flush here will cause a duplicate entry exception if
            # unique constraints are violated, before we go ahead and delete
            # the node group templates
            session.flush(objects=[cluster_template])

            # If node_groups has not been specified, then we are
            # keeping the old ones so don't delete!
            if explicit_node_groups:
                model_query(m.TemplatesRelation,
                            context, session=session).filter_by(
                    cluster_template_id=cluster_template_id).delete()

                for ng in node_groups:
                    node_group = m.TemplatesRelation()
                    node_group.update(ng)
                    node_group.update({"cluster_template_id":
                                       cluster_template_id})
                    session.add(node_group)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for ClusterTemplate: %s") % e.columns)

    return cluster_template_get(context, cluster_template_id)
Ejemplo n.º 13
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set(
        reduce(operator.add, [[six.text_type(sg.id), sg.name]
                              for sg in security_group_list], []))
    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Ejemplo n.º 14
0
def _get_instance_obj(instances, instance_ref):
    for instance in instances:
        if (instance.instance_id == instance_ref
                or instance.instance_name == instance_ref):
            return instance

    raise exceptions.NotFoundException(str(instance_ref),
                                       _("Instance %s not found"))
Ejemplo n.º 15
0
 def find(self, **kwargs):
     images = self.client.list(**kwargs)
     num_matches = len(images)
     if num_matches == 0:
         raise exc.NotFoundException(kwargs, "No images matching %s.")
     elif num_matches > 1:
         raise exc.NoUniqueMatchException(response=images, query=kwargs)
     else:
         return images[0]
Ejemplo n.º 16
0
    def _add_to_streaming_element(self, element, path):
        if element not in ['mapper', 'reducer']:
            raise ex.NotFoundException(
                element, _('"%s" child cannot be added '
                           'to streaming element'))

        x.get_and_create_if_not_exist(self.doc, self.tag_name, 'streaming')

        x.add_text_element_to_tag(self.doc, 'streaming', element, path)
Ejemplo n.º 17
0
def get_share(client_instance, share_id, raise_on_error=False):
    try:
        return client_instance.shares.get(share_id)
    except manila_ex.NotFound:
        if raise_on_error:
            raise ex.NotFoundException(share_id,
                                       _("Share with id %s was not found."))
        else:
            return None
Ejemplo n.º 18
0
def remove_volume(context, instance_id, volume_id):
    session = get_session()
    with session.begin():
        instance = _instance_get(context, session, instance_id)
        if not instance:
            raise ex.NotFoundException(instance_id,
                                       _("Instance id '%s' not found!"))

        instance.volumes.remove(volume_id)
Ejemplo n.º 19
0
def job_execution_destroy(context, job_execution_id):
    session = get_session()
    with session.begin():
        job_ex = _job_execution_get(context, session, job_execution_id)
        if not job_ex:
            raise ex.NotFoundException(job_execution_id,
                                       _("JobExecution id '%s' not found!"))

        session.delete(job_ex)
Ejemplo n.º 20
0
def node_group_update(context, node_group_id, values):
    session = get_session()
    with session.begin():
        node_group = _node_group_get(context, session, node_group_id)
        if not node_group:
            raise ex.NotFoundException(node_group_id,
                                       _("Node Group id '%s' not found!"))

        node_group.update(values)
Ejemplo n.º 21
0
def instance_update(context, instance_id, values):
    session = get_session()
    with session.begin():
        instance = _instance_get(context, session, instance_id)
        if not instance:
            raise ex.NotFoundException(instance_id,
                                       _("Instance id '%s' not found!"))

        instance.update(values)
Ejemplo n.º 22
0
def cluster_destroy(context, cluster_id):
    session = get_session()
    with session.begin():
        cluster = _cluster_get(context, session, cluster_id)
        if not cluster:
            raise ex.NotFoundException(cluster_id,
                                       _("Cluster id '%s' not found!"))

        session.delete(cluster)
Ejemplo n.º 23
0
def check_security_groups_exist(security_groups):
    security_group_list = neutron.client().list_security_groups()
    allowed_groups = set()
    for sg in security_group_list['security_groups']:
        allowed_groups.add(sg['name'])
        allowed_groups.add(sg['id'])

    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Ejemplo n.º 24
0
def get_stack(stack_name, raise_on_missing=True):
    for stack in base.execute_with_retries(client().stacks.list,
                                           filters={'name': stack_name}):
        return stack

    if not raise_on_missing:
        return None

    raise ex.NotFoundException({'stack': stack_name},
                               _('Failed to find stack %(stack)s'))
Ejemplo n.º 25
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set()
    for sg in security_group_list:
        allowed_groups.add(six.text_type(sg.id))
        allowed_groups.add(sg.name)

    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Ejemplo n.º 26
0
def job_update(context, job_id, values):
    session = get_session()

    with session.begin():
        job = _job_get(context, session, job_id)
        if not job:
            raise ex.NotFoundException(job_id, _("Job id '%s' not found!"))
        job.update(values)

    return job
Ejemplo n.º 27
0
def node_group_remove(context, node_group_id):
    session = get_session()

    with session.begin():
        node_group = _node_group_get(context, session, node_group_id)
        if not node_group:
            raise ex.NotFoundException(node_group_id,
                                       _("Node Group id '%s' not found!"))

        session.delete(node_group)
Ejemplo n.º 28
0
def job_binary_internal_destroy(context, job_binary_internal_id):
    session = get_session()
    with session.begin():
        job_binary_internal = _job_binary_internal_get(context, session,
                                                       job_binary_internal_id)
        if not job_binary_internal:
            raise ex.NotFoundException(job_binary_internal_id,
                                       "JobBinaryInternal id '%s' not found!")

        session.delete(job_binary_internal)
Ejemplo n.º 29
0
def cluster_template_destroy(context, cluster_template_id):
    session = get_session()
    with session.begin():
        cluster_template = _cluster_template_get(context, session,
                                                 cluster_template_id)
        if not cluster_template:
            raise ex.NotFoundException(cluster_template_id,
                                       "Cluster Template id '%s' not found!")

        session.delete(cluster_template)
Ejemplo n.º 30
0
 def _add_to_prepare_element(self, element, paths):
     if element not in ['delete', 'mkdir']:
         raise ex.NotFoundException(
             element, _('"%s" child cannot be '
                        'added to prepare element'))
     prop = x.get_and_create_if_not_exist(self.doc, self.tag_name,
                                          'prepare')
     for path in paths:
         elem = xml.parseString('<%s path="%s"/>' % (element, path))
         prop.appendChild(elem.firstChild)