Пример #1
0
def delete_proxy_user_for_job_execution(job_execution):
    '''Delete a proxy user based on a JobExecution

    :param job_execution: The job execution with proxy user information
    :returns: An updated job_configs dictionary or None

    '''
    proxy_configs = job_execution.job_configs.get('proxy_configs')
    if proxy_configs is not None:
        proxy_username = proxy_configs.get('proxy_username')
        key = key_manager.API().get(
            context.current(), proxy_configs.get('proxy_password'))
        proxy_password = key.get_encoded()
        proxy_trust_id = proxy_configs.get('proxy_trust_id')
        proxy_user = k.auth_for_proxy(proxy_username,
                                      proxy_password,
                                      proxy_trust_id)
        t.delete_trust(proxy_user, proxy_trust_id)
        proxy_user_delete(proxy_username)
        key_manager.API().delete(context.current(),
                                 proxy_configs.get('proxy_password'))
        update = job_execution.job_configs.to_dict()
        del update['proxy_configs']
        return update
    return None
Пример #2
0
def url_for(service_catalog=None, service_type="identity", endpoint_type="publicURL"):
    if not service_catalog:
        service_catalog = context.current().service_catalog
    try:
        return keystone_service_catalog.ServiceCatalogV2({"serviceCatalog": json.loads(service_catalog)}).url_for(
            service_type=service_type, endpoint_type=endpoint_type, region_name=CONF.os_region_name
        )
    except keystone_ex.EndpointNotFound:
        ctx = context.current()
        return keystone_service_catalog.ServiceCatalogV3(
            ctx.auth_token, {"catalog": json.loads(service_catalog)}
        ).url_for(service_type=service_type, endpoint_type=endpoint_type, region_name=CONF.os_region_name)
Пример #3
0
def retrieve_auth_url():
    """This function return auth url v2.0 api. Hadoop Swift library doesn't
    support keystone v3 api.
    """
    info = urlparse.urlparse(context.current().auth_uri)

    return "%s://%s:%s/%s/" % (info.scheme, info.hostname, info.port, 'v2.0')
Пример #4
0
def check_cluster_update(cluster_id, data, **kwargs):
    cluster = api.get_cluster(cluster_id)

    verification = verification_base.validate_verification_ops(cluster, data)
    acl.check_tenant_for_update(context.current(), cluster)
    if not verification:
        acl.check_protected_from_update(cluster, data)
Пример #5
0
def execute_job(job_id, data):

    # Elements common to all job types
    cluster_id = data['cluster_id']
    configs = data.get('job_configs', {})

    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster_id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    instance = plugin.get_oozie_server(cluster)

    extra = {}
    info = None
    if CONF.use_namespaces and not CONF.use_floating_ips:
        info = instance.remote().get_neutron_info()
        extra['neutron'] = info

    # Not in Java job types but present for all others
    input_id = data.get('input_id', None)
    output_id = data.get('output_id', None)

    # Since we will use a unified class in the database, we pass
    # a superset for all job types
    job_ex_dict = {'input_id': input_id, 'output_id': output_id,
                   'job_id': job_id, 'cluster_id': cluster_id,
                   'info': {'status': 'Pending'}, 'job_configs': configs,
                   'extra': extra}
    job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)

    context.spawn("Starting Job Execution %s" % job_execution.id,
                  manager.run_job, job_execution)
    return job_execution
Пример #6
0
    def _build_proxy_command(self, command, instance=None, port=None,
                             info=None, rootwrap_command=None):
        # Accepted keywords in the proxy command template:
        # {host}, {port}, {tenant_id}, {network_id}, {router_id}
        keywords = {}

        if not info:
            info = self.get_neutron_info(instance)
        keywords['tenant_id'] = context.current().tenant_id
        keywords['network_id'] = info['network']

        # Query Neutron only if needed
        if '{router_id}' in command:
            client = neutron.NeutronClient(info['network'], info['token'],
                                           info['tenant'])
            keywords['router_id'] = client.get_router()

        keywords['host'] = instance.management_ip
        keywords['port'] = port

        try:
            command = command.format(**keywords)
        except KeyError as e:
            LOG.error(_LE('Invalid keyword in proxy_command: {result}').format(
                result=e))
            # Do not give more details to the end-user
            raise ex.SystemError('Misconfiguration')
        if rootwrap_command:
            command = '{0} {1}'.format(rootwrap_command, command)
        return command
Пример #7
0
def mount_to_instances(instances):
    if len(instances) == 0:
        return

    use_xfs = _can_use_xfs(instances)

    for instance in instances:
        with context.set_current_instance_id(instance.instance_id):
            devices = _find_instance_devices(instance)

            if devices:
                cpo.add_provisioning_step(
                    instance.cluster_id,
                    _("Mount volumes to {inst_name} instance").format(
                        inst_name=instance.instance_name), len(devices))

                formatted_devices = []
                lock = threading.Lock()
                with context.ThreadGroup() as tg:
                    # Since formating can take several minutes (for large
                    # disks) and can be done in parallel, launch one thread
                    # per disk.
                    for device in devices:
                        tg.spawn('format-device-%s' % device, _format_device,
                                 instance, device, use_xfs, formatted_devices,
                                 lock)

                conductor.instance_update(
                    context.current(), instance,
                    {"storage_devices_number": len(formatted_devices)})
                for idx, dev in enumerate(formatted_devices):
                    _mount_volume_to_node(instance, idx+1, dev, use_xfs)
Пример #8
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException(
            {'id': cluster_id}, _('Object with %s not found'))

    b.check_plugin_labels(
        cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine and
            not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and
            cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {"old_engine": cluster.sahara_info.get('infrastructure_engine'),
             "new_engine": engine_type_and_version})

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Пример #9
0
def _retrieve_tenant():
    try:
        return context.current().tenant_name
    except RuntimeError:
        LOG.exception("Cannot retrieve tenant for swift integration. "
                      "Stopping cluster creation")
        #todo(slukjanov?) raise special error here
        raise RuntimeError("Cannot retrieve tenant for swift integration")
Пример #10
0
def use_os_admin_auth_token(cluster):
    if cluster.trust_id:
        ctx = context.current()
        ctx.username = CONF.keystone_authtoken.admin_user
        ctx.tenant_id = cluster.tenant_id
        client = keystone.client_for_trusts(cluster.trust_id)
        ctx.token = client.auth_token
        ctx.service_catalog = json.dumps(client.service_catalog.catalog["catalog"])
Пример #11
0
def retrieve_auth_url():
    version = "v3" if CONF.use_identity_api_v3 else "v2.0"
    ctx = context.current()
    if ctx.service_catalog:
        info = urlparse.urlparse(url_for(ctx.service_catalog, "identity"))
    else:
        info = urlparse.urlparse(CONF.keystone_authtoken.auth_uri)
    return "%s://%s/%s" % (info[:2] + (version,))
Пример #12
0
def check_cinder_exists():
    service_type = 'volumev2'
    try:
        base.url_for(context.current().service_catalog, service_type,
                     endpoint_type=CONF.cinder.endpoint_type)
        return True
    except ex.SystemError:
        return False
Пример #13
0
def retrieve_auth_url(endpoint_type="internalURL", version=None):
    if not version:
        version = 'v3' if CONF.use_identity_api_v3 else 'v2.0'
    ctx = context.current()
    if ctx.service_catalog:
        auth_url = url_for(ctx.service_catalog, 'identity', endpoint_type)
    else:
        auth_url = CONF.keystone_authtoken.auth_uri
    return prepare_auth_url(auth_url, version)
Пример #14
0
def get_raw_data(job_binary, proxy_configs=None):
    conn_kwargs = {}
    if proxy_configs:
        key = key_manager.API().get(context.current(),
                                    proxy_configs.get('proxy_password'))
        password = key.get_encoded()
        conn_kwargs.update(username=proxy_configs.get('proxy_username'),
                           password=password,
                           trust_id=proxy_configs.get('proxy_trust_id'))
    else:
        key = key_manager.API().get(context.current(),
                                    job_binary.extra.get('password'))
        password = key.get_encoded()
        conn_kwargs.update(username=job_binary.extra.get('user'),
                           password=password)

    conn = sw.client(**conn_kwargs)
    return _get_raw_data(job_binary, conn)
Пример #15
0
def client():
    ctx = context.current()
    heat_url = base.url_for(ctx.service_catalog, 'orchestration',
                            endpoint_type=CONF.heat.endpoint_type)
    return heat_client.Client('1', heat_url, token=ctx.auth_token,
                              cert_file=CONF.heat.ca_file,
                              insecure=CONF.heat.api_insecure,
                              username=ctx.username,
                              include_pass=True)
Пример #16
0
def retrieve_auth_url(endpoint_type="internalURL"):
    version = 'v3' if CONF.use_identity_api_v3 else 'v2.0'
    ctx = context.current()
    if ctx.service_catalog:
        info = urlparse.urlparse(url_for(ctx.service_catalog, 'identity',
                                         endpoint_type))
    else:
        info = urlparse.urlparse(CONF.keystone_authtoken.auth_uri)
    return "%s://%s/%s" % (info[:2] + (version,))
Пример #17
0
def check_cinder_exists():
    if CONF.cinder.api_version == 1:
        service_type = 'volume'
    else:
        service_type = 'volumev2'
    try:
        base.url_for(context.current().service_catalog, service_type)
        return True
    except ex.SystemError:
        return False
Пример #18
0
def delete_secret(id, ctx=None):
    """delete a secret from the external key manager

    :param id: The identifier of the secret to delete
    :param ctx: The context, and associated authentication, to use with
                this operation (defaults to the current context)
    """
    if ctx is None:
        ctx = context.current()
    key_manager.API().delete(ctx, id)
Пример #19
0
def store_secret(secret, ctx=None):
    """store a secret and return its identifier

    :param secret: The secret to store, this should be a string
    :param ctx: The context, and associated authentication, to use with
                this operation (defaults to the current context)
    """
    if ctx is None:
        ctx = context.current()
    key = passphrase.Passphrase(secret)
    return key_manager.API().store(ctx, key)
Пример #20
0
def check_cinder_exists():
    if CONF.cinder.api_version == 2:
        service_type = 'volumev2'
    else:
        service_type = 'volumev3'
    try:
        base.url_for(context.current().service_catalog, service_type,
                     endpoint_type=CONF.cinder.endpoint_type)
        return True
    except keystone_exceptions.EndpointNotFound:
        return False
Пример #21
0
def get_secret(id, ctx=None):
    """get a secret associated with an id

    :param id: The identifier of the secret to retrieve
    :param ctx: The context, and associated authentication, to use with
                this operation (defaults to the current context)
    """
    if ctx is None:
        ctx = context.current()
    key = key_manager.API().get(ctx, id)
    return key.get_encoded()
Пример #22
0
    def get_neutron_info(self):
        neutron_info = h.HashableDict()
        neutron_info['network'] = (
            self.instance.node_group.cluster.neutron_management_network)
        ctx = context.current()
        neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network')
        neutron_info['token'] = ctx.token
        neutron_info['tenant'] = ctx.tenant_name
        neutron_info['host'] = self.instance.management_ip

        LOG.debug('Returning neutron info: {0}'.format(neutron_info))
        return neutron_info
Пример #23
0
def client():
    ctx = context.current()
    volume_url = base.url_for(ctx.service_catalog, 'volume')

    cinder = cinder_client.Client(ctx.username,
                                  ctx.token,
                                  ctx.tenant_id, volume_url)

    cinder.client.auth_token = ctx.token
    cinder.client.management_url = volume_url

    return cinder
Пример #24
0
    def get_neutron_info(self, instance=None):
        if not instance:
            instance = self.instance
        neutron_info = h.HashableDict()
        neutron_info['network'] = instance.cluster.neutron_management_network
        ctx = context.current()
        neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network')
        neutron_info['token'] = ctx.auth_token
        neutron_info['tenant'] = ctx.tenant_name
        neutron_info['host'] = instance.management_ip

        LOG.debug('Returning neutron info: {info}'.format(info=neutron_info))
        return neutron_info
Пример #25
0
def client():
    ctx = context.current()
    auth_url = base.retrieve_auth_url()
    compute_url = base.url_for(ctx.service_catalog, 'compute')

    nova = nova_client.Client(username=ctx.username,
                              api_key=None,
                              project_id=ctx.tenant_id,
                              auth_url=auth_url)

    nova.client.auth_token = ctx.token
    nova.client.management_url = compute_url
    nova.images = images.SaharaImageManager(nova)
    return nova
Пример #26
0
def check_job_execution_cancel(job_id, **kwargs):
    ctx = context.current()
    je = conductor.job_execution_get(ctx, job_id)

    if je.tenant_id != ctx.tenant_id:
            raise ex.CancelingFailed(
                _("Job execution with id '%s' cannot be canceled "
                  "because it wasn't created in this tenant")
                % job_id)

    if je.is_protected:
        raise ex.CancelingFailed(
            _("Job Execution with id '%s' cannot be canceled "
              "because it's marked as protected") % job_id)
Пример #27
0
    def get_neutron_info(self, instance=None):
        if not instance:
            instance = self.instance
        neutron_info = dict()
        neutron_info['network'] = instance.cluster.neutron_management_network
        ctx = context.current()
        neutron_info['token'] = ctx.auth_token
        neutron_info['tenant'] = ctx.tenant_name
        neutron_info['host'] = instance.management_ip

        log_info = copy.deepcopy(neutron_info)
        del log_info['token']
        LOG.debug('Returning neutron info: {info}'.format(info=log_info))
        return neutron_info
Пример #28
0
    def get_neutron_info(self, instance=None):
        if not instance:
            instance = self.instance
        neutron_info = h.HashableDict()
        neutron_info['network'] = (
            instance.node_group.cluster.neutron_management_network)
        ctx = context.current()
        neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network')
        neutron_info['token'] = ctx.auth_token
        neutron_info['tenant'] = ctx.tenant_name
        neutron_info['host'] = instance.management_ip

        LOG.debug('Returning neutron info: {0}'.format(neutron_info))
        return neutron_info
Пример #29
0
def check_job_execution_cancel(job_execution_id, **kwargs):
    ctx = context.current()
    je = conductor.job_execution_get(ctx, job_execution_id)

    if je.tenant_id != ctx.tenant_id:
            raise ex.CancelingFailed(
                _("Job execution with id '%s' cannot be canceled "
                  "because it wasn't created in this tenant")
                % job_execution_id)

    if je.is_protected:
        raise ex.CancelingFailed(
            _("Job Execution with id '%s' cannot be canceled "
              "because it's marked as protected") % job_execution_id)
Пример #30
0
def url_for(service_catalog=None, service_type='identity',
            endpoint_type="internalURL"):
    if not service_catalog:
        service_catalog = context.current().service_catalog
    try:
        return keystone_service_catalog.ServiceCatalogV2(
            json.loads(service_catalog)).url_for(
                service_type=service_type, interface=endpoint_type,
                region_name=CONF.os_region_name)
    except keystone_ex.EndpointNotFound:
        return keystone_service_catalog.ServiceCatalogV3(
            json.loads(service_catalog)).url_for(
                service_type=service_type, interface=endpoint_type,
                region_name=CONF.os_region_name)
Пример #31
0
def client():
    ctx = context.current()
    auth_url = base.retrieve_auth_url()
    compute_url = base.url_for(ctx.service_catalog, 'compute')

    nova = nova_client.Client(username=ctx.username,
                              api_key=None,
                              project_id=ctx.tenant_id,
                              auth_url=auth_url)

    nova.client.auth_token = ctx.auth_token
    nova.client.management_url = compute_url
    nova.images = images.SaharaImageManager(nova)
    return nova
Пример #32
0
def delete_trust_from_cluster(cluster):
    """Delete a trust from a cluster

    If the cluster has a trust delegated to it, then delete it and set
    the trust id to None.

    :param cluster: The cluster to delete the trust from.

    """
    if cluster.trust_id:
        keystone_client = keystone.client_for_admin_from_trust(cluster.trust_id)
        delete_trust(keystone_client, cluster.trust_id)
        ctx = context.current()
        conductor.cluster_update(ctx, cluster, {"trust_id": None})
Пример #33
0
    def get_neutron_info(self, instance=None):
        if not instance:
            instance = self.instance
        neutron_info = dict()
        neutron_info['network'] = instance.cluster.neutron_management_network
        ctx = context.current()
        neutron_info['token'] = context.get_auth_token()
        neutron_info['tenant'] = ctx.tenant_name
        neutron_info['host'] = _get_access_ip(instance)

        log_info = copy.deepcopy(neutron_info)
        del log_info['token']
        LOG.debug('Returning neutron info: {info}'.format(info=log_info))
        return neutron_info
Пример #34
0
def delete_proxy_user_for_cluster(cluster):
    '''Delete a proxy user based on a Cluster

    :param cluster: The cluster model with proxy user information

    '''
    proxy_configs = cluster.cluster_configs.get('proxy_configs')
    if proxy_configs is not None:
        proxy_username = proxy_configs.get('proxy_username')
        key = key_manager.API().get(
            context.current(), proxy_configs.get('proxy_password'))
        proxy_password = key.get_encoded()
        proxy_trust_id = proxy_configs.get('proxy_trust_id')
        proxy_user = k.auth_for_proxy(proxy_username,
                                      proxy_password,
                                      proxy_trust_id)
        t.delete_trust(proxy_user, proxy_trust_id)
        proxy_user_delete(proxy_username)
        key_manager.API().delete(context.current(),
                                 proxy_configs.get('proxy_password'))
        update = {'cluster_configs': cluster.cluster_configs.to_dict()}
        del update['cluster_configs']['proxy_configs']
        conductor.cluster_update(context.ctx(), cluster, update)
Пример #35
0
def client():
    ctx = context.current()
    if CONF.cinder_api_version == 1:
        volume_url = base.url_for(ctx.service_catalog, 'volume')
        cinder = cinder_client_v1.Client(ctx.username, ctx.auth_token,
                                         ctx.tenant_id, volume_url)
    else:
        volume_url = base.url_for(ctx.service_catalog, 'volumev2')
        cinder = cinder_client_v2.Client(ctx.username, ctx.auth_token,
                                         ctx.tenant_id, volume_url)

    cinder.client.auth_token = ctx.auth_token
    cinder.client.management_url = volume_url

    return cinder
Пример #36
0
def create_trust(cluster):
    client = keystone.client()

    ctx = context.current()

    trustee_id = keystone.client_for_admin(ctx.tenant_id).user_id

    trust = client.trusts.create(trustor_user=client.user_id,
                                 trustee_user=trustee_id,
                                 impersonation=True,
                                 role_names=ctx.roles,
                                 project=client.tenant_id)
    conductor.cluster_update(ctx,
                             cluster,
                             {'trust_id': trust.id})
Пример #37
0
    def get_configs(self, input_data, output_data, proxy_configs=None):
        configs = {}

        if proxy_configs:
            key = key_manager.API().get(context.current(),
                                        proxy_configs.get('proxy_password'))
            configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get(
                'proxy_username')
            configs[sw.HADOOP_SWIFT_PASSWORD] = key.get_encoded()
            configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get(
                'proxy_trust_id')
            configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name
            return configs

        for src in (input_data, output_data):
            if src.type == "swift" and hasattr(src, "credentials"):
                if "user" in src.credentials:
                    configs[sw.HADOOP_SWIFT_USERNAME] = src.credentials['user']
                if "password" in src.credentials:
                    key = key_manager.API().get(context.current(),
                                                src.credentials['password'])
                    configs[sw.HADOOP_SWIFT_PASSWORD] = key.get_encoded()
                break
        return configs
Пример #38
0
def delete_trust_from_cluster(cluster):
    '''Delete a trust from a cluster

    If the cluster has a trust delegated to it, then delete it and set
    the trust id to None.

    :param cluster: The cluster to delete the trust from.

    '''
    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster)
    if CONF.use_identity_api_v3 and cluster.trust_id:
        keystone_auth = keystone.auth_for_admin(trust_id=cluster.trust_id)
        delete_trust(keystone_auth, cluster.trust_id)
        conductor.cluster_update(ctx, cluster, {'trust_id': None})
Пример #39
0
def delete_trust_from_cluster(cluster):
    '''Delete a trust from a cluster

    If the cluster has a trust delegated to it, then delete it and set
    the trust id to None.

    :param cluster: The cluster to delete the trust from.

    '''
    if cluster.trust_id:
        keystone_client = keystone.client_for_admin_from_trust(
            cluster.trust_id)
        delete_trust(keystone_client, cluster.trust_id)
        ctx = context.current()
        conductor.cluster_update(ctx, cluster, {'trust_id': None})
Пример #40
0
    def get_configs(self, proxy_configs=None):
        configs = {}

        if proxy_configs:
            key = key_manager.API().get(context.current(),
                                        proxy_configs.get('proxy_password'))
            password = key.get_encoded()
            configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get(
                'proxy_username')
            configs[sw.HADOOP_SWIFT_PASSWORD] = password
            configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get(
                'proxy_trust_id')
            configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name
            return configs

        return configs
Пример #41
0
def get_os_admin_auth_plugin(cluster):
    '''Return an admin auth plugin based on the cluster trust id or project

    If a trust id is available for the cluster, then it is used
    to create an auth plugin scoped to the trust. If not, the
    project name from the current context is used to scope the
    auth plugin.

    :param cluster: The id of the cluster to use for trust identification.

    '''
    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster)
    if CONF.use_identity_api_v3 and cluster.trust_id:
        return keystone.auth_for_admin(trust_id=cluster.trust_id)
    return keystone.auth_for_admin(project_name=ctx.tenant_name)
Пример #42
0
def retrieve_auth_url():
    """This function returns auth url v2.0 api.

    Hadoop Swift library doesn't support keystone v3 api.
    """
    info = urlparse.urlparse(context.current().auth_uri)

    if CONF.use_domain_for_proxy_users:
        url = 'v3/auth'
    else:
        url = 'v2.0'

    return '{scheme}://{hostname}:{port}/{url}/'.format(scheme=info.scheme,
                                                        hostname=info.hostname,
                                                        port=info.port,
                                                        url=url)
Пример #43
0
def url_for(service_catalog=None,
            service_type='identity',
            endpoint_type="internalURL"):
    if not service_catalog:
        service_catalog = context.current().service_catalog
    try:
        return keystone_service_catalog.ServiceCatalogV2(
            json.loads(service_catalog)).url_for(
                service_type=service_type,
                interface=endpoint_type,
                region_name=CONF.os_region_name)
    except keystone_ex.EndpointNotFound:
        return keystone_service_catalog.ServiceCatalogV3(
            json.loads(service_catalog)).url_for(
                service_type=service_type,
                interface=endpoint_type,
                region_name=CONF.os_region_name)
Пример #44
0
def create_trust_for_cluster(cluster):
    '''Create a trust for a cluster

    This delegates a trust from the current user to the Sahara admin user
    based on the current context roles, and then adds the trust identifier
    to the cluster object.

    '''
    trustor = keystone.client()
    ctx = context.current()
    trustee = keystone.client_for_admin()

    trust_id = create_trust(trustor=trustor,
                            trustee=trustee,
                            role_names=ctx.roles)

    conductor.cluster_update(ctx, cluster, {'trust_id': trust_id})
Пример #45
0
def client():
    ctx = context.current()
    auth_url = base.retrieve_auth_url()

    if CONF.use_identity_api_v3:
        keystone = keystone_client_v3.Client(username=ctx.username,
                                             token=ctx.token,
                                             tenant_id=ctx.tenant_id,
                                             auth_url=auth_url)
        keystone.management_url = auth_url
    else:
        keystone = keystone_client.Client(username=ctx.username,
                                          token=ctx.token,
                                          tenant_id=ctx.tenant_id,
                                          auth_url=auth_url)

    return keystone
Пример #46
0
def add_provisioning_step(cluster_id, step_name, total):
    if CONF.disable_event_log or not g.check_cluster_exists(cluster_id):
        return

    prev_step = get_current_provisioning_step(cluster_id)
    if prev_step:
        conductor.cluster_provision_step_update(context.ctx(), prev_step)

    step_type = context.ctx().current_instance_info.step_type
    new_step = conductor.cluster_provision_step_add(
        context.ctx(), cluster_id, {
            'step_name': step_name,
            'step_type': step_type,
            'total': total,
            'started_at': timeutils.utcnow(),
        })
    context.current().current_instance_info.step_id = new_step
    return new_step
Пример #47
0
def use_os_admin_auth_token(cluster):
    '''Set the current context to the admin user's trust scoped token

    This will configure the current context to the admin user's identity
    with the cluster's tenant. It will also generate an authentication token
    based on the admin user and a delegated trust associated with the
    cluster.

    :param cluster: The cluster to use for tenant and trust identification.

    '''
    if cluster.trust_id:
        ctx = context.current()
        ctx.username = CONF.keystone_authtoken.admin_user
        ctx.tenant_id = cluster.tenant_id
        client = keystone.client_for_admin_from_trust(cluster.trust_id)
        ctx.auth_token = client.auth_token
        ctx.service_catalog = json.dumps(client.service_catalog.get_data())
Пример #48
0
def client():
    ctx = context.current()
    args = {
        'insecure': CONF.cinder.api_insecure,
        'cacert': CONF.cinder.ca_file
    }
    if CONF.cinder.api_version == 1:
        volume_url = base.url_for(ctx.service_catalog, 'volume')
        cinder = cinder_client_v1.Client(ctx.username, ctx.auth_token,
                                         ctx.tenant_id, volume_url, **args)
    else:
        volume_url = base.url_for(ctx.service_catalog, 'volumev2')
        cinder = cinder_client_v2.Client(ctx.username, ctx.auth_token,
                                         ctx.tenant_id, volume_url, **args)

    cinder.client.auth_token = ctx.auth_token
    cinder.client.management_url = volume_url

    return cinder
Пример #49
0
def use_os_admin_auth_token(cluster):
    '''Set the current context to the admin user's trust scoped token

    This will configure the current context to the admin user's identity
    with the cluster's tenant. It will also generate an authentication token
    based on the admin user and a delegated trust associated with the
    cluster.

    :param cluster: The cluster to use for tenant and trust identification.

    '''
    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster)
    if CONF.use_identity_api_v3 and cluster.trust_id:
        ctx.username = CONF.keystone_authtoken.admin_user
        ctx.tenant_id = cluster.tenant_id
        ctx.auth_plugin = keystone.auth_for_admin(trust_id=cluster.trust_id)
        ctx.auth_token = context.get_auth_token()
        ctx.service_catalog = json.dumps(
            keystone.service_catalog_from_auth(ctx.auth_plugin))
Пример #50
0
def create_proxy_user_for_job_execution(job_execution):
    '''Creates a proxy user and adds the credentials to the job execution

    :param job_execution: The job execution model to update

    '''
    username = '******'.format(job_execution.id)
    key = passphrase.Passphrase(proxy_user_create(username))
    password = key_manager.API().store(context.current(), key)
    current_user = k.auth()
    proxy_user = k.auth_for_proxy(username, password)
    trust_id = t.create_trust(trustor=current_user,
                              trustee=proxy_user,
                              role_names=CONF.proxy_user_role_names)
    update = {'job_configs': job_execution.job_configs.to_dict()}
    update['job_configs']['proxy_configs'] = {
        'proxy_username': username,
        'proxy_password': password,
        'proxy_trust_id': trust_id
        }
    conductor.job_execution_update(context.ctx(), job_execution, update)
Пример #51
0
def create_trust_for_cluster(cluster, expires=True):
    '''Create a trust for a cluster

    This delegates a trust from the current user to the Sahara admin user
    based on the current context roles, and then adds the trust identifier
    to the cluster object.

    :param expires: The trust will expire if this is set to True.
    '''
    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster)
    if CONF.use_identity_api_v3 and not cluster.trust_id:
        trustor = keystone.auth()
        trustee = keystone.auth_for_admin(
            project_name=CONF.keystone_authtoken.admin_tenant_name)

        trust_id = create_trust(trustor=trustor,
                                trustee=trustee,
                                role_names=ctx.roles,
                                allow_redelegation=True)

        conductor.cluster_update(ctx, cluster, {'trust_id': trust_id})
Пример #52
0
def execute_job(job_id, data):

    # Elements common to all job types
    cluster_id = data['cluster_id']
    configs = data.get('job_configs', {})

    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster_id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    instance = plugin.get_oozie_server(cluster)

    extra = {}
    info = None
    if CONF.use_namespaces and not CONF.use_floating_ips:
        info = instance.remote().get_neutron_info()
        extra['neutron'] = info

    # Not in Java job types but present for all others
    input_id = data.get('input_id', None)
    output_id = data.get('output_id', None)

    # Since we will use a unified class in the database, we pass
    # a superset for all job types
    job_ex_dict = {
        'input_id': input_id,
        'output_id': output_id,
        'job_id': job_id,
        'cluster_id': cluster_id,
        'info': {
            'status': 'Pending'
        },
        'job_configs': configs,
        'extra': extra
    }
    job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)

    context.spawn("Starting Job Execution %s" % job_execution.id,
                  manager.run_job, job_execution)
    return job_execution
Пример #53
0
    def _build_proxy_command(self,
                             command,
                             instance=None,
                             port=None,
                             info=None,
                             rootwrap_command=None):
        # Accepted keywords in the proxy command template:
        # {host}, {port}, {tenant_id}, {network_id}, {router_id}
        keywords = {}

        if not info:
            info = self.get_neutron_info(instance)
        keywords['tenant_id'] = context.current().tenant_id
        keywords['network_id'] = info['network']

        # Query Neutron only if needed
        if '{router_id}' in command:
            auth = trusts.get_os_admin_auth_plugin(instance.cluster)
            client = neutron.NeutronClient(info['network'],
                                           info['token'],
                                           info['tenant'],
                                           auth=auth)
            keywords['router_id'] = client.get_router()

        keywords['host'] = instance.management_ip
        keywords['port'] = port

        try:
            command = command.format(**keywords)
        except KeyError as e:
            LOG.error(
                _LE('Invalid keyword in proxy_command: {result}').format(
                    result=e))
            # Do not give more details to the end-user
            raise ex.SystemError('Misconfiguration')
        if rootwrap_command:
            command = '{0} {1}'.format(rootwrap_command, command)
        return command
Пример #54
0
def create_proxy_user_for_cluster(cluster):
    '''Creates a proxy user and adds the credentials to the cluster

    :param cluster: The cluster model to update

    '''
    if cluster.cluster_configs.get('proxy_configs'):
        return cluster
    username = '******'.format(cluster.id)
    key = passphrase.Passphrase(proxy_user_create(username))
    password = key_manager.API().store(context.current(), key)
    current_user = k.auth()
    proxy_user = k.auth_for_proxy(username, password)
    trust_id = t.create_trust(trustor=current_user,
                              trustee=proxy_user,
                              role_names=CONF.proxy_user_role_names)
    update = {'cluster_configs': cluster.cluster_configs.to_dict()}
    update['cluster_configs']['proxy_configs'] = {
        'proxy_username': username,
        'proxy_password': password,
        'proxy_trust_id': trust_id
        }
    return conductor.cluster_update(context.ctx(), cluster, update)
Пример #55
0
def get_swift_configs():
    configs = x.load_hadoop_xml_defaults('swift/resources/conf-template.xml')
    for conf in configs:
        if conf['name'] == HADOOP_SWIFT_AUTH_URL:
            conf['value'] = su.retrieve_auth_url() + "auth/tokens/"
        if conf['name'] == HADOOP_SWIFT_TENANT:
            conf['value'] = retrieve_tenant()
        if CONF.os_region_name and conf['name'] == HADOOP_SWIFT_REGION:
            conf['value'] = CONF.os_region_name
        if conf['name'] == HADOOP_SWIFT_DOMAIN_NAME:
            # NOTE(jfreud): Don't be deceived here... Even though there is an
            # attribute provided by context called domain_name, it is used for
            # domain scope, and hadoop-swiftfs always authenticates using
            # project scope. The purpose of the setting below is to override
            # the default value for project domain and user domain, domain id
            # as 'default', which may not always be correct.
            # TODO(jfreud): When hadoop-swiftfs allows it, stop hoping that
            # project_domain_name is always equal to user_domain_name.
            conf['value'] = context.current().project_domain_name

    result = [cfg for cfg in configs if cfg['value']]
    LOG.info("Swift would be integrated with the following "
             "params: {result}".format(result=result))
    return result
Пример #56
0
    def _upload_wrapper_xml(self, where, job_dir, job_configs):
        xml_name = 'spark.xml'
        proxy_configs = job_configs.get('proxy_configs')
        configs = {}
        if proxy_configs:
            key = key_manager.API().get(context.current(),
                                        proxy_configs.get('proxy_password'))
            password = key.get_encoded()
            configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get(
                'proxy_username')
            configs[sw.HADOOP_SWIFT_PASSWORD] = password
            configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get(
                'proxy_trust_id')
            configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name
        else:
            cfgs = job_configs.get('configs', {})
            targets = [sw.HADOOP_SWIFT_USERNAME, sw.HADOOP_SWIFT_PASSWORD]
            configs = {k: cfgs[k] for k in targets if k in cfgs}

        content = xmlutils.create_hadoop_xml(configs)
        with remote.get_remote(where) as r:
            dst = os.path.join(job_dir, xml_name)
            r.write_file_to(dst, content)
        return xml_name
Пример #57
0
def _release_remote_semaphore():
    _global_remote_semaphore.release()
    context.current().remote_semaphore.release()
Пример #58
0
def _acquire_remote_semaphore():
    context.current().remote_semaphore.acquire()
    _global_remote_semaphore.acquire()
Пример #59
0
 def call(self, name, **kwargs):
     ctx = context.current()
     return self.__client.call(ctx.to_dict(), name, **kwargs)