Exemplo n.º 1
0
    def instantiate(self, update_existing, disable_rollback=True):
        main_tmpl = self._get_main_template()

        heat = h.client()

        kwargs = {
            'stack_name': self.cluster.stack_name,
            'timeout_mins': 180,
            'disable_rollback': disable_rollback,
            'parameters': {},
            'template': main_tmpl,
            'files': self.files}

        if CONF.heat_stack_tags:
            kwargs['tags'] = ",".join(CONF.heat_stack_tags)

        if not update_existing:
            LOG.debug("Creating Heat stack with args: {args}"
                      .format(args=kwargs))
            b.execute_with_retries(heat.stacks.create, **kwargs)
        else:
            stack = h.get_stack(self.cluster.stack_name)
            self.last_updated_time = stack.updated_time
            LOG.debug("Updating Heat stack {stack} with args: "
                      "{args}".format(stack=stack, args=kwargs))
            b.execute_with_retries(stack.update, **kwargs)

        self.heat_stack = h.get_stack(self.cluster.stack_name)
Exemplo n.º 2
0
def get_image(**kwargs):
    if len(kwargs) == 1 and 'id' in kwargs:
        return b.execute_with_retries(
            sahara_images.image_manager().get, kwargs['id'])
    else:
        return b.execute_with_retries(
            sahara_images.image_manager().find, **kwargs)
Exemplo n.º 3
0
def init_instances_ips(instance):
    """Extracts internal and management ips.

    As internal ip will be used the first ip from the nova networks CIDRs.
    If use_floating_ip flag is set than management ip will be the first
    non-internal ip.
    """

    server = nova.get_instance_info(instance)

    management_ip = None
    internal_ip = None

    for network_label, addresses in six.iteritems(server.addresses):
        for address in addresses:
            if address['OS-EXT-IPS:type'] == 'fixed':
                internal_ip = internal_ip or address['addr']
            else:
                management_ip = management_ip or address['addr']

    cluster = instance.cluster
    if (not CONF.use_floating_ips or
            (cluster.has_proxy_gateway() and
             not instance.node_group.is_proxy_gateway)):
        management_ip = internal_ip

    # NOTE(aignatov): Once bug #1262529 is fixed this 'if' block should be
    # reviewed and reformatted again, probably removed completely.
    if CONF.use_neutron and not (management_ip and internal_ip):
        LOG.debug("Instance doesn't yet contain Floating IP or Internal IP. "
                  "Floating IP={mgmt_ip}, Internal IP={internal_ip}. "
                  "Trying to get via Neutron.".format(
                      mgmt_ip=management_ip, internal_ip=internal_ip))
        neutron_client = neutron.client()
        ports = b.execute_with_retries(
            neutron_client.list_ports, device_id=server.id)["ports"]
        if ports:
            target_port_id = ports[0]['id']
            fl_ips = b.execute_with_retries(
                neutron_client.list_floatingips,
                port_id=target_port_id)['floatingips']
            if fl_ips:
                fl_ip = fl_ips[0]
                if not internal_ip:
                    internal_ip = fl_ip['fixed_ip_address']
                    LOG.debug('Found fixed IP {internal_ip}'
                              .format(internal_ip=internal_ip))
                # Zeroing management_ip if Sahara in private network
                if not CONF.use_floating_ips:
                    management_ip = internal_ip
                elif not management_ip:
                    management_ip = fl_ip['floating_ip_address']
                    LOG.debug('Found floating IP {mgmt_ip}'
                              .format(mgmt_ip=management_ip))

    conductor.instance_update(context.ctx(), instance,
                              {"management_ip": management_ip,
                               "internal_ip": internal_ip})

    return internal_ip and management_ip
Exemplo n.º 4
0
    def instantiate(self, update_existing, disable_rollback=True):
        main_tmpl = self._get_main_template()
        kwargs = {
            'stack_name': self.cluster.stack_name,
            'timeout_mins': 180,
            'disable_rollback': disable_rollback,
            'parameters': {},
            'template': main_tmpl,
            'files': self.files}

        if CONF.heat_stack_tags:
            kwargs['tags'] = ",".join(CONF.heat_stack_tags)

        log_kwargs = copy.deepcopy(kwargs)
        log_kwargs['template'] = yaml.safe_load(log_kwargs['template'])
        for filename in log_kwargs['files'].keys():
            log_kwargs['files'][filename] = yaml.safe_load(
                log_kwargs['files'][filename])
        log_kwargs = json.dumps(log_kwargs)

        if not update_existing:
            LOG.debug("Creating Heat stack with args: \n{args}"
                      .format(args=log_kwargs))
            b.execute_with_retries(h.client().stacks.create, **kwargs)
        else:
            stack = h.get_stack(self.cluster.stack_name)
            self.last_updated_time = stack.updated_time
            LOG.debug("Updating Heat stack {stack} with args: \n"
                      "{args}".format(stack=stack, args=log_kwargs))
            b.execute_with_retries(stack.update, **kwargs)
Exemplo n.º 5
0
def _get_neutron_limits():
    limits = {}
    neutron = neutron_client.client()
    tenant_id = context.ctx().tenant_id
    total_lim = b.execute_with_retries(neutron.show_quota, tenant_id)['quota']

    # tmckay-fp here we would just get the limits all the time
    usage_fip = b.execute_with_retries(
        neutron.list_floatingips, tenant_id=tenant_id)['floatingips']
    limits['floatingips'] = _sub_limit(total_lim['floatingip'],
                                       len(usage_fip))

    usage_sg = b.execute_with_retries(
        neutron.list_security_groups, tenant_id=tenant_id).get(
        'security_groups', [])
    limits['security_groups'] = _sub_limit(total_lim['security_group'],
                                           len(usage_sg))

    usage_sg_rules = b.execute_with_retries(
        neutron.list_security_group_rules, tenant_id=tenant_id).get(
        'security_group_rules', [])
    limits['security_group_rules'] = _sub_limit(
        total_lim['security_group_rule'], len(usage_sg_rules))

    usage_ports = b.execute_with_retries(
        neutron.list_ports, tenant_id=tenant_id)['ports']
    limits['ports'] = _sub_limit(total_lim['port'], len(usage_ports))

    return limits
Exemplo n.º 6
0
def _delete_volume(volume_id):
    LOG.debug("Deleting volume {volume}".format(volume=volume_id))
    volume = cinder.get_volume(volume_id)
    try:
        b.execute_with_retries(volume.delete)
    except Exception:
        LOG.error("Can't delete volume {volume}".format(volume=volume.id))
Exemplo n.º 7
0
def _get_neutron_limits():
    limits = {}
    if not CONF.use_neutron:
        return limits
    neutron = neutron_client.client()
    tenant_id = context.ctx().tenant_id
    total_lim = b.execute_with_retries(neutron.show_quota, tenant_id)['quota']

    if CONF.use_floating_ips:
        usage_fip = b.execute_with_retries(
            neutron.list_floatingips,  tenant_id=tenant_id)['floatingips']
        limits['floatingips'] = _sub_limit(total_lim['floatingip'],
                                           len(usage_fip))

    usage_sg = b.execute_with_retries(
        neutron.list_security_groups, tenant_id=tenant_id).get(
        'security_groups', [])
    limits['security_groups'] = _sub_limit(total_lim['security_group'],
                                           len(usage_sg))

    usage_sg_rules = b.execute_with_retries(
        neutron.list_security_group_rules, tenant_id=tenant_id).get(
        'security_group_rules', [])
    limits['security_group_rules'] = _sub_limit(
        total_lim['security_group_rule'], len(usage_sg_rules))

    usage_ports = b.execute_with_retries(
        neutron.list_ports, tenant_id=tenant_id)['ports']
    limits['ports'] = _sub_limit(total_lim['port'], len(usage_ports))

    return limits
Exemplo n.º 8
0
def proxy_user_delete(username=None, user_id=None):
    '''Delete the user from the proxy domain.

    :param username: The name of the user to delete.
    :param user_id: The id of the user to delete, if provided this overrides
                    the username.
    :raises NotFoundException: If there is an error locating the user in the
                               proxy domain.

    '''
    admin = k.client_for_admin()
    if not user_id:
        domain = domain_for_proxy()
        user_list = b.execute_with_retries(
            admin.users.list, domain=domain.id, name=username)
        if len(user_list) == 0:
            raise ex.NotFoundException(
                value=username,
                message_template=_('Failed to find user %s'))
        if len(user_list) > 1:
            raise ex.NotFoundException(
                value=username,
                message_template=_('Unexpected results found when searching '
                                   'for user %s'))
        user_id = user_list[0].id
    b.execute_with_retries(admin.users.delete, user_id)
    LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
Exemplo n.º 9
0
def wait_stack_completion(stack, is_update=False, last_updated_time=None):
    base.execute_with_retries(stack.get)
    while not _verify_completion(stack, is_update, last_updated_time):
        context.sleep(1)
        base.execute_with_retries(stack.get)

    if stack.status != 'COMPLETE':
        raise ex.HeatStackException(stack.stack_status_reason)
Exemplo n.º 10
0
    def _delete_aa_server_group(self, cluster):
        if cluster.anti_affinity:
            server_group_name = g.generate_aa_group_name(cluster.name)
            client = nova.client().server_groups

            server_groups = b.execute_with_retries(client.findall,
                                                   name=server_group_name)
            if len(server_groups) == 1:
                b.execute_with_retries(client.delete, server_groups[0].id)
Exemplo n.º 11
0
def wait_stack_completion(stack):
    # NOTE: expected empty status because status of stack
    # maybe is not set in heat database
    while stack.status in ['IN_PROGRESS', '']:
        context.sleep(1)
        base.execute_with_retries(stack.get)

    if stack.status != 'COMPLETE':
        raise ex.HeatStackException(stack.stack_status)
Exemplo n.º 12
0
def get_private_network_cidrs(cluster):
    neutron_client = client()
    private_net = base.execute_with_retries(neutron_client.show_network, cluster.neutron_management_network)

    cidrs = []
    for subnet_id in private_net["network"]["subnets"]:
        subnet = base.execute_with_retries(neutron_client.show_subnet, subnet_id)
        cidrs.append(subnet["subnet"]["cidr"])

    return cidrs
Exemplo n.º 13
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume {id} from instance".format(id=volume_id))
        b.execute_with_retries(nova.client().volumes.delete_server_volume, instance.instance_id, volume_id)
    except Exception:
        LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))

    detach_timeout = CONF.timeouts.detach_volume_timeout
    LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(timeout=detach_timeout, id=volume_id))
    _await_detach(volume_id)
Exemplo n.º 14
0
def proxy_user_create(username):
    '''Create a new user in the proxy domain

    Creates the username specified with a random password.

    :param username: The name of the new user.
    :returns: The password created for the user.

    '''
    admin = k.client_for_admin()
    domain = domain_for_proxy()
    password = six.text_type(uuid.uuid4())
    b.execute_with_retries(
        admin.users.create, name=username, password=password, domain=domain.id)
    LOG.debug('Created proxy user {username}'.format(username=username))
    return password
Exemplo n.º 15
0
def delete_stack(cluster):
    stack_name = cluster.stack_name
    base.execute_with_retries(client().stacks.delete, stack_name)
    stack = get_stack(stack_name, raise_on_missing=False)
    while stack is not None:
        # Valid states: IN_PROGRESS, empty and COMPLETE
        if stack.status in ['IN_PROGRESS', '', 'COMPLETE']:
            context.sleep(5)
        else:
            raise ex.HeatStackException(
                message=_(
                    "Cannot delete heat stack {name}, reason: "
                    "stack status: {status}, status reason: {reason}").format(
                    name=stack_name, status=stack.status,
                    reason=stack.stack_status_reason))
        stack = get_stack(stack_name, raise_on_missing=False)
Exemplo n.º 16
0
def proxy_domain_users_list():
    '''Return a list of all users in the proxy domain.'''
    admin = k.client_for_admin()
    domain = domain_for_proxy()
    if domain:
        return b.execute_with_retries(admin.users.list, domain=domain.id)
    return []
Exemplo n.º 17
0
    def shutdown_cluster(self, cluster):
        """Shutdown specified cluster and all related resources."""
        try:
            b.execute_with_retries(heat.client().stacks.delete, cluster.name)
            stack = heat.get_stack(cluster.name)
            heat.wait_stack_completion(stack)
        except heat_exc.HTTPNotFound:
            LOG.warning(_LW('Did not find stack for cluster. Trying to delete '
                            'cluster manually.'))

            # Stack not found. Trying to delete cluster like direct engine
            #  do it
            self._shutdown_instances(cluster)
            self._delete_aa_server_group(cluster)

        self._clean_job_executions(cluster)
        self._remove_db_objects(cluster)
Exemplo n.º 18
0
def get_stack(stack_name):
    heat = client()
    for stack in base.execute_with_retries(heat.stacks.list):
        if stack.stack_name == stack_name:
            return stack

    raise ex.NotFoundException(_('Failed to find stack %(stack)s')
                               % {'stack': stack_name})
Exemplo n.º 19
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute']
    limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed'])
    limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed'])
    limits['instances'] = _sub_limit(lim['maxTotalInstances'],
                                     lim['totalInstancesUsed'])
    return limits
Exemplo n.º 20
0
def get_stack(stack_name, raise_on_missing=True):
    for stack in base.execute_with_retries(
            client().stacks.list, filters={'name': stack_name}):
        return stack

    if not raise_on_missing:
        return None

    raise ex.NotFoundException({'stack': stack_name},
                               _('Failed to find stack %(stack)s'))
Exemplo n.º 21
0
    def instantiate(self, update_existing, disable_rollback=True):
        main_tmpl = self._get_main_template()

        heat = h.client()

        kwargs = {
            'stack_name': self.cluster.name,
            'timeout_mins': 180,
            'disable_rollback': disable_rollback,
            'parameters': {},
            'template': main_tmpl}

        if not update_existing:
            b.execute_with_retries(heat.stacks.create, **kwargs)
        else:
            for stack in b.execute_with_retries(heat.stacks.list):
                if stack.stack_name == self.cluster.name:
                    b.execute_with_retries(stack.update, **kwargs)
                    break

        return ClusterStack(self, h.get_stack(self.cluster.name))
Exemplo n.º 22
0
    def instantiate(self, update_existing, disable_rollback=True):
        files = {}
        main_tmpl = self._get_main_template(files)

        heat = h.client()

        kwargs = {
            'stack_name': self.cluster.name,
            'timeout_mins': 180,
            'disable_rollback': disable_rollback,
            'parameters': {},
            'template': main_tmpl,
            'files': files}

        if not update_existing:
            b.execute_with_retries(heat.stacks.create, **kwargs)
        else:
            stack = h.get_stack(self.cluster.name)
            b.execute_with_retries(stack.update, **kwargs)

        self.heat_stack = h.get_stack(self.cluster.name)
Exemplo n.º 23
0
    def _shutdown_instance(self, instance):
        if instance.node_group.floating_ip_pool:
            try:
                b.execute_with_retries(networks.delete_floating_ip,
                                       instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(_LW("Attempted to delete non-existent floating IP "
                                "in pool {pool} from instance")
                            .format(pool=instance.node_group.floating_ip_pool))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(_LW("Detaching volumes from instance failed"))

        try:
            b.execute_with_retries(nova.client().servers.delete,
                                   instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(_LW("Attempted to delete non-existent instance"))

        conductor.instance_remove(context.ctx(), instance)
Exemplo n.º 24
0
def _create_attach_volume(ctx, instance, size, volume_type,
                          volume_local_to_instance, name=None,
                          availability_zone=None):
    if CONF.cinder.api_version == 1:
        kwargs = {'size': size, 'display_name': name}
    else:
        kwargs = {'size': size, 'name': name}

    kwargs['volume_type'] = volume_type
    if availability_zone is not None:
        kwargs['availability_zone'] = availability_zone

    if volume_local_to_instance:
        kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}

    volume = b.execute_with_retries(cinder.client().volumes.create, **kwargs)
    conductor.append_volume(ctx, instance, volume.id)
    _await_available(volume)

    resp = b.execute_with_retries(nova.client().volumes.create_server_volume,
                                  instance.instance_id, volume.id, None)
    return resp.device
Exemplo n.º 25
0
def _get_cinder_limits():
    avail_limits = {}
    cinder = cinder_client.client()
    lim = {}
    for l in b.execute_with_retries(cinder.limits.get).absolute:
        lim[l.name] = l.value

    avail_limits['volumes'] = _sub_limit(lim['maxTotalVolumes'],
                                         lim['totalVolumesUsed'])

    avail_limits['volume_gbs'] = _sub_limit(lim['maxTotalVolumeGigabytes'],
                                            lim['totalGigabytesUsed'])

    return avail_limits
Exemplo n.º 26
0
    def _delete_auto_security_group(self, node_group):
        if not node_group.auto_security_group:
            return

        if not node_group.security_groups:
            # node group has no security groups
            # nothing to delete
            return

        name = node_group.security_groups[-1]

        try:
            client = nova.client().security_groups
            security_group = b.execute_with_retries(client.get, name)
            if (security_group.name !=
                    g.generate_auto_security_group_name(node_group)):
                LOG.warning(_LW("Auto security group for node group {name} is "
                                "not found").format(name=node_group.name))
                return
            b.execute_with_retries(client.delete, name)
        except Exception:
            LOG.warning(_LW("Failed to delete security group {name}").format(
                name=name))
Exemplo n.º 27
0
def retrieve_preauth_url():
    '''This function returns the storage URL for Swift in the current project.

    :returns: The storage URL for the current project's Swift store, or None
              if it can't be found.

    '''
    client = k.client()
    catalog = clients_base.execute_with_retries(
        client.service_catalog.get_endpoints, 'object-store')
    for ep in catalog.get('object-store'):
        if ep.get('interface') == 'public':
            return ep.get('url')
    return None
Exemplo n.º 28
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute']
    limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed'])
    limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed'])
    limits['instances'] = _sub_limit(lim['maxTotalInstances'],
                                     lim['totalInstancesUsed'])
    if CONF.use_neutron:
        return limits
    if CONF.use_floating_ips:
        limits['floatingips'] = _sub_limit(lim['maxTotalFloatingIps'],
                                           lim['totalFloatingIpsUsed'])
    limits['security_groups'] = _sub_limit(lim['maxSecurityGroups'],
                                           lim['totalSecurityGroupsUsed'])
    limits['security_group_rules'] = _sub_limit(lim['maxSecurityGroupRules'],
                                                0)
    return limits
Exemplo n.º 29
0
def _update_limits_for_ng(limits, ng, count):
    sign = lambda x: (1, -1)[x < 0]
    nova = nova_client.client()
    limits['instances'] += count
    flavor = b.execute_with_retries(nova.flavors.get, ng.flavor_id)
    limits['ram'] += flavor.ram * count
    limits['cpu'] += flavor.vcpus * count
    # tmckay-fp this is fine, it will be zero without it
    if ng.floating_ip_pool:
        limits['floatingips'] += count
    if ng.volumes_per_node:
        limits['volumes'] += ng.volumes_per_node * count
        limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count
    if ng.auto_security_group:
        limits['security_groups'] += sign(count)
        # NOTE: +3 - all traffic for private network
        limits['security_group_rules'] += (
            (len(ng.open_ports) + 3) * sign(count))
    limits['ports'] += count
Exemplo n.º 30
0
    def get_router(self):
        matching_router = NeutronClient.routers.get(self.network, None)
        if matching_router:
            LOG.debug("Returning cached qrouter")
            return matching_router["id"]

        routers = self.neutron.list_routers()["routers"]
        for router in routers:
            device_id = router["id"]
            ports = base.execute_with_retries(self.neutron.list_ports, device_id=device_id)["ports"]
            port = next((port for port in ports if port["network_id"] == self.network), None)
            if port:
                matching_router = router
                NeutronClient.routers[self.network] = matching_router
                break

        if not matching_router:
            raise ex.SystemError(_("Neutron router corresponding to network " "%s is not found") % self.network)

        return matching_router["id"]
Exemplo n.º 31
0
def domain_for_proxy():
    '''Return the proxy domain or None

    If configured to use the proxy domain, this function will return that
    domain. If not configured to use the proxy domain, this function will
    return None. If the proxy domain can't be found this will raise an
    exception.

    :returns: A Keystone Domain object or None.
    :raises ConfigurationError: If the domain is requested but not specified.
    :raises NotFoundException: If the domain name is specified but cannot be
                               found.

    '''
    if CONF.use_domain_for_proxy_users is False:
        return None
    if CONF.proxy_user_domain_name is None:
        raise ex.ConfigurationError(
            _('Proxy domain requested but not '
              'specified.'))
    admin = k.client_for_admin()

    global PROXY_DOMAIN
    if not PROXY_DOMAIN:
        domain_list = b.execute_with_retries(admin.domains.list,
                                             name=CONF.proxy_user_domain_name)
        if len(domain_list) == 0:
            raise ex.NotFoundException(
                value=CONF.proxy_user_domain_name,
                message_template=_('Failed to find domain %s'))
        # the domain name should be globally unique in Keystone
        if len(domain_list) > 1:
            raise ex.NotFoundException(
                value=CONF.proxy_user_domain_name,
                message_template=_('Unexpected results found when searching '
                                   'for domain %s'))
        PROXY_DOMAIN = domain_list[0]
    return PROXY_DOMAIN
Exemplo n.º 32
0
def generate_topology_map(cluster, is_node_awareness):
    mapping = _read_compute_topology()
    nova_client = nova.client()
    topology_mapping = {}
    for ng in cluster.node_groups:
        for i in ng.instances:
            # TODO(alazarev) get all servers info with one request
            ni = b.execute_with_retries(nova_client.servers.get, i.instance_id)
            hostId = ni.hostId
            if hostId not in mapping:
                raise ex.NotFoundException(
                    i.instance_id,
                    _("Was not able to find compute node topology for VM %s"))
            rack = mapping[hostId]
            if is_node_awareness:
                rack += "/" + hostId

            topology_mapping[i.instance_name] = rack
            topology_mapping[i.management_ip] = rack
            topology_mapping[i.internal_ip] = rack

    topology_mapping.update(_read_swift_topology())

    return topology_mapping
Exemplo n.º 33
0
def _update_limits_for_ng(limits, ng, count):
    sign = lambda x: (1, -1)[x < 0]
    nova = nova_client.client()
    limits['instances'] += count
    flavor = b.execute_with_retries(nova.flavors.get, ng.flavor_id)
    limits['ram'] += flavor.ram * count
    limits['cpu'] += flavor.vcpus * count
    # tmckay-fp this is fine, it will be zero without it
    if ng.floating_ip_pool:
        limits['floatingips'] += count
    if ng.volumes_per_node:
        limits['volumes'] += ng.volumes_per_node * count
        limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count
    if ng.auto_security_group:
        limits['security_groups'] += sign(count)
        # NOTE: +3 - all traffic for private network
        if CONF.use_neutron:
            limits['security_group_rules'] += (
                (len(ng.open_ports) + 3) * sign(count))
        else:
            limits['security_group_rules'] = max(
                limits['security_group_rules'], len(ng.open_ports) + 3)
    if CONF.use_neutron:
        limits['ports'] += count
Exemplo n.º 34
0
    def _delete_aa_server_groups(self, cluster):
        if cluster.anti_affinity:
            for i in range(1, cluster.anti_affinity_ratio):
                server_group_name = g.generate_aa_group_name(cluster.name, i)

                client = nova.client().server_groups

                server_groups = b.execute_with_retries(client.findall,
                                                       name=server_group_name)
                if len(server_groups) == 1:
                    b.execute_with_retries(client.delete, server_groups[0].id)
                '''In case the server group is created
                using mitaka or older version'''
                old_server_group_name = server_group_name.rsplit('-', 1)[0]
                server_groups_old = b.execute_with_retries(
                    client.findall, name=old_server_group_name)
                if len(server_groups_old) == 1:
                    b.execute_with_retries(client.delete,
                                           server_groups_old[0].id)
Exemplo n.º 35
0
def remove_image_tags(image_id, tags):
    manager = sahara_images.image_manager()
    b.execute_with_retries(manager.untag, image_id, tags)
    return b.execute_with_retries(manager.get, image_id)
Exemplo n.º 36
0
def register_image(image_id, username, description=None):
    manager = sahara_images.image_manager()
    b.execute_with_retries(
        manager.set_image_info, image_id, username, description)
    return b.execute_with_retries(manager.get, image_id)
Exemplo n.º 37
0
def unregister_image(image_id):
    manager = sahara_images.image_manager()
    b.execute_with_retries(manager.unset_image_info, image_id)
    return b.execute_with_retries(manager.get, image_id)
Exemplo n.º 38
0
 def get_node_group_image_username(self, node_group):
     image_id = node_group.get_image_id()
     return b.execute_with_retries(
         nova.client().images.get, image_id).username
Exemplo n.º 39
0
def assign_floating_ip(instance_id, pool):
    ip = b.execute_with_retries(nova.client().floating_ips.create, pool)
    server = b.execute_with_retries(nova.client().servers.get, instance_id)
    b.execute_with_retries(server.add_floating_ip, ip)
Exemplo n.º 40
0
def unregister_image(image_id):
    client = nova.client()
    b.execute_with_retries(client.images.unset_description, image_id)
    return b.execute_with_retries(client.images.get, image_id)
Exemplo n.º 41
0
def get_keypair(keypair_name):
    return base.execute_with_retries(client().keypairs.get, keypair_name)
Exemplo n.º 42
0
def get_images(name, tags):
    return b.execute_with_retries(
        nova.client().images.list_registered, name, tags)
Exemplo n.º 43
0
def get_registered_image(id):
    return b.execute_with_retries(
        nova.client().images.get_registered_image, id)
Exemplo n.º 44
0
def lazy_delete_stack(cluster):
    '''Attempt to delete stack once, but do not await successful deletion'''
    stack_name = cluster.stack_name
    base.execute_with_retries(client().stacks.delete, stack_name)
Exemplo n.º 45
0
def init_instances_ips(instance):
    """Extracts internal and management ips.

    As internal ip will be used the first ip from the nova networks CIDRs.
    If use_floating_ip flag is set than management ip will be the first
    non-internal ip.
    """

    server = nova.get_instance_info(instance)

    management_ip = None
    internal_ip = None

    for addresses in six.itervalues(server.addresses):
        # selects IPv4 preferentially
        for address in sorted(addresses, key=lambda addr: addr['version']):
            if address['OS-EXT-IPS:type'] == 'fixed':
                internal_ip = internal_ip or address['addr']
            else:
                management_ip = management_ip or address['addr']

    cluster = instance.cluster
    if (not CONF.use_floating_ips
            or (cluster.has_proxy_gateway()
                and not instance.node_group.is_proxy_gateway)):
        management_ip = internal_ip

    # NOTE(aignatov): Once bug #1262529 is fixed this 'if' block should be
    # reviewed and reformatted again, probably removed completely.
    if CONF.use_neutron and not (management_ip and internal_ip):
        LOG.debug("Instance doesn't yet contain Floating IP or Internal IP. "
                  "Floating IP={mgmt_ip}, Internal IP={internal_ip}. "
                  "Trying to get via Neutron.".format(mgmt_ip=management_ip,
                                                      internal_ip=internal_ip))
        neutron_client = neutron.client()
        ports = b.execute_with_retries(neutron_client.list_ports,
                                       device_id=server.id)["ports"]
        if ports:
            target_port_id = ports[0]['id']
            fl_ips = b.execute_with_retries(
                neutron_client.list_floatingips,
                port_id=target_port_id)['floatingips']
            if fl_ips:
                fl_ip = fl_ips[0]
                if not internal_ip:
                    internal_ip = fl_ip['fixed_ip_address']
                    LOG.debug('Found fixed IP {internal_ip}'.format(
                        internal_ip=internal_ip))
                # Zeroing management_ip if Sahara in private network
                if not CONF.use_floating_ips:
                    management_ip = internal_ip
                elif not management_ip:
                    management_ip = fl_ip['floating_ip_address']
                    LOG.debug('Found floating IP {mgmt_ip}'.format(
                        mgmt_ip=management_ip))

    conductor.instance_update(context.ctx(), instance, {
        "management_ip": management_ip,
        "internal_ip": internal_ip
    })

    return internal_ip and management_ip
Exemplo n.º 46
0
def _find_instance_volume_devices(instance):
    volumes = b.execute_with_retries(nova.client().volumes.get_server_volumes,
                                     instance.instance_id)
    devices = [volume.device for volume in volumes]
    return devices
Exemplo n.º 47
0
def get_image_tags(image_id):
    return b.execute_with_retries(sahara_images.image_manager().get,
                                  image_id).tags
Exemplo n.º 48
0
def delete_floating_ip(instance_id):
    fl_ips = b.execute_with_retries(nova.client().floating_ips.findall,
                                    instance_id=instance_id)
    for fl_ip in fl_ips:
        b.execute_with_retries(nova.client().floating_ips.delete, fl_ip.id)
Exemplo n.º 49
0
def get_image(**kwargs):
    if len(kwargs) == 1 and 'id' in kwargs:
        return b.execute_with_retries(nova.client().images.get, kwargs['id'])
    else:
        return b.execute_with_retries(nova.client().images.find, **kwargs)
Exemplo n.º 50
0
 def get_node_group_image_username(self, node_group):
     image_id = node_group.get_image_id()
     return b.execute_with_retries(sahara_images.image_manager().get,
                                   image_id).username
Exemplo n.º 51
0
def register_image(image_id, username, description=None):
    client = nova.client()
    b.execute_with_retries(
        client.images.set_description, image_id, username, description)
    return b.execute_with_retries(client.images.get, image_id)
Exemplo n.º 52
0
def get_images(name, tags):
    return b.execute_with_retries(
        sahara_images.image_manager().list_registered, name, tags)
Exemplo n.º 53
0
def remove_image_tags(image_id, tags):
    client = nova.client()
    b.execute_with_retries(client.images.untag, image_id, tags)
    return b.execute_with_retries(client.images.get, image_id)
Exemplo n.º 54
0
def get_resource(stack, resource):
    return base.execute_with_retries(client().resources.get, stack, resource)
Exemplo n.º 55
0
def get_volume(volume_id):
    return base.execute_with_retries(client().volumes.get, volume_id)
Exemplo n.º 56
0
def get_registered_image(id):
    return b.execute_with_retries(
        sahara_images.image_manager().get_registered_image, id)
Exemplo n.º 57
0
def get_flavor(**kwargs):
    return base.execute_with_retries(client().flavors.find, **kwargs)
Exemplo n.º 58
0
def get_instance_info(instance):
    return base.execute_with_retries(client().servers.get,
                                     instance.instance_id)
Exemplo n.º 59
0
def get_network(**kwargs):
    try:
        return base.execute_with_retries(client().networks.find, **kwargs)
    except nova_ex.NotFound:
        return None
Exemplo n.º 60
0
def get_network(id):
    try:
        return base.execute_with_retries(client().find_resource_by_id,
                                         'network', id)
    except n_ex.NotFound:
        return None