Пример #1
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(_LW("Attempted to delete non-existent floating IP "
                                "in pool {pool} from instance {instance}")
                            .format(pool=instance.node_group.floating_ip_pool,
                                    instance=instance.instance_id))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(_LW("Detaching volumes from instance {id} failed")
                        .format(id=instance.instance_id))

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(_LW("Attempted to delete non-existent instance {id}")
                        .format(id=instance.instance_id))

        conductor.instance_remove(ctx, instance)
Пример #2
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume %s from instance %s" % (
            volume_id, instance.instance_name))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.exception(_LE("Can't detach volume %s"), volume.id)

    detach_timeout = CONF.detach_volume_timeout
    LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
                                                          volume_id))
    s_time = tu.utcnow()
    while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
        volume = cinder.get_volume(volume_id)
        if volume.status not in ['available', 'error']:
            context.sleep(2)
        else:
            LOG.debug("Volume %s has been detached" % volume_id)
            return
    else:
        LOG.warn(_LW("Can't detach volume %(volume)s. "
                     "Current status of volume: %(status)s"),
                 {'volume': volume_id, 'status': volume.status})
Пример #3
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warn(_LW("Attempted to delete non-existent floating IP in "
                         "pool %(pool)s from instance %(instance)s"),
                         {'pool': instance.node_group.floating_ip_pool,
                          'instance': instance.instance_id})

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warn(_LW("Detaching volumes from instance %s failed"),
                     instance.instance_id)

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warn(_LW("Attempted to delete non-existent instance %s"),
                     instance.instance_id)

        conductor.instance_remove(ctx, instance)
Пример #4
0
    def _delete_auto_security_group(self, node_group):
        if not node_group.auto_security_group:
            return

        name = node_group.security_groups[-1]

        try:
            nova.client().security_groups.delete(name)
        except Exception:
            LOG.exception("Failed to delete security group %s", name)
Пример #5
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume {id} from instance".format(id=volume_id))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))

    detach_timeout = CONF.timeouts.detach_volume_timeout
    LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(
        timeout=detach_timeout, id=volume_id))
    _await_detach(volume_id)
Пример #6
0
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume {id}  from instance {instance}".format(
                  id=volume_id, instance=instance.instance_name))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))

    detach_timeout = CONF.timeouts.detach_volume_timeout
    LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(
              timeout=detach_timeout, id=volume_id))
    _await_detach(volume_id)
Пример #7
0
    def _run_instance(self, cluster, node_group, idx, aa_group=None,
                      old_aa_groups=None):
        """Create instance using nova client and persist them into DB."""
        ctx = context.ctx()
        name = g.generate_instance_name(cluster.name, node_group.name, idx)

        userdata = self._generate_user_data_script(node_group, name)

        if old_aa_groups:
            # aa_groups: node process -> instance ids
            aa_ids = []
            for node_process in node_group.node_processes:
                aa_ids += old_aa_groups.get(node_process) or []

            # create instances only at hosts w/ no instances
            # w/ aa-enabled processes
            hints = {'different_host': sorted(set(aa_ids))} if aa_ids else None
        else:
            hints = {'group': aa_group} if (
                aa_group and self._need_aa_server_group(node_group)) else None

        if CONF.use_neutron:
            net_id = cluster.neutron_management_network
            nics = [{"net-id": net_id, "v4-fixed-ip": ""}]

            nova_instance = nova.client().servers.create(
                name, node_group.get_image_id(), node_group.flavor_id,
                scheduler_hints=hints, userdata=userdata,
                key_name=cluster.user_keypair_id,
                nics=nics, security_groups=node_group.security_groups)
        else:
            nova_instance = nova.client().servers.create(
                name, node_group.get_image_id(), node_group.flavor_id,
                scheduler_hints=hints, userdata=userdata,
                key_name=cluster.user_keypair_id,
                security_groups=node_group.security_groups)

        instance_id = conductor.instance_add(ctx, node_group,
                                             {"instance_id": nova_instance.id,
                                              "instance_name": name})

        if old_aa_groups:
            # save instance id to aa_groups to support aa feature
            for node_process in node_group.node_processes:
                if node_process in cluster.anti_affinity:
                    aa_group_ids = old_aa_groups.get(node_process, [])
                    aa_group_ids.append(nova_instance.id)
                    old_aa_groups[node_process] = aa_group_ids

        return instance_id
Пример #8
0
    def _run_instance(self, cluster, node_group, idx, aa_groups):
        """Create instance using nova client and persist them into DB."""
        ctx = context.ctx()
        name = self._get_inst_name(cluster.name, node_group.name, idx)

        userdata = self._generate_user_data_script(node_group, name)

        # aa_groups: node process -> instance ids
        aa_ids = []
        for node_process in node_group.node_processes:
            aa_ids += aa_groups.get(node_process) or []

        # create instances only at hosts w/ no instances
        # w/ aa-enabled processes
        hints = {'different_host': list(set(aa_ids))} if aa_ids else None

        if CONF.use_neutron:
            net_id = cluster.neutron_management_network
            nics = [{"net-id": net_id, "v4-fixed-ip": ""}]

            nova_instance = nova.client().servers.create(
                name,
                node_group.get_image_id(),
                node_group.flavor_id,
                scheduler_hints=hints,
                userdata=userdata,
                key_name=cluster.user_keypair_id,
                nics=nics)
        else:
            nova_instance = nova.client().servers.create(
                name,
                node_group.get_image_id(),
                node_group.flavor_id,
                scheduler_hints=hints,
                userdata=userdata,
                key_name=cluster.user_keypair_id)

        instance_id = conductor.instance_add(ctx, node_group, {
            "instance_id": nova_instance.id,
            "instance_name": name
        })
        # save instance id to aa_groups to support aa feature
        for node_process in node_group.node_processes:
            if node_process in cluster.anti_affinity:
                aa_group_ids = aa_groups.get(node_process, [])
                aa_group_ids.append(nova_instance.id)
                aa_groups[node_process] = aa_group_ids

        return instance_id
Пример #9
0
def _update_limits_for_ng(limits, ng, count):
    sign = lambda x: (1, -1)[x < 0]
    nova = nova_client.client()
    limits['instances'] += count
    flavor = b.execute_with_retries(nova.flavors.get, ng.flavor_id)
    limits['ram'] += flavor.ram * count
    limits['cpu'] += flavor.vcpus * count
    # tmckay-fp this is fine, it will be zero without it
    if ng.floating_ip_pool:
        limits['floatingips'] += count
    if ng.volumes_per_node:
        limits['volumes'] += ng.volumes_per_node * count
        limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count
    if ng.auto_security_group:
        limits['security_groups'] += sign(count)
        # NOTE: +3 - all traffic for private network
        if CONF.use_neutron:
            limits['security_group_rules'] += ((len(ng.open_ports) + 3) *
                                               sign(count))
        else:
            limits['security_group_rules'] = max(
                limits['security_group_rules'],
                len(ng.open_ports) + 3)
    if CONF.use_neutron:
        limits['ports'] += count
Пример #10
0
def _create_attach_volume(ctx,
                          instance,
                          size,
                          volume_type,
                          name=None,
                          availability_zone=None):
    if CONF.cinder.api_version == 1:
        kwargs = {'size': size, 'display_name': name}
    else:
        kwargs = {'size': size, 'name': name}

    kwargs['volume_type'] = volume_type
    if availability_zone is not None:
        kwargs['availability_zone'] = availability_zone

    volume = cinder.client().volumes.create(**kwargs)
    conductor.append_volume(ctx, instance, volume.id)

    while volume.status != 'available':
        volume = cinder.get_volume(volume.id)
        if volume.status == 'error':
            raise ex.SystemError(_("Volume %s has error status") % volume.id)

        context.sleep(1)

    resp = nova.client().volumes.create_server_volume(instance.instance_id,
                                                      volume.id, None)
    return resp.device
Пример #11
0
    def _create_auto_security_group(self, node_group):
        name = g.generate_auto_security_group_name(node_group)
        nova_client = nova.client()
        security_group = nova_client.security_groups.create(
            name, "Auto security group created by Sahara for Node Group '%s' "
                  "of cluster '%s'." %
                  (node_group.name, node_group.cluster.name))

        # ssh remote needs ssh port, agents are not implemented yet
        nova_client.security_group_rules.create(
            security_group.id, 'tcp', SSH_PORT, SSH_PORT, "0.0.0.0/0")

        # open all traffic for private networks
        if CONF.use_neutron:
            for cidr in neutron.get_private_network_cidrs(node_group.cluster):
                for protocol in ['tcp', 'udp']:
                    nova_client.security_group_rules.create(
                        security_group.id, protocol, 1, 65535, cidr)

                nova_client.security_group_rules.create(
                    security_group.id, 'icmp', -1, -1, cidr)

        # enable ports returned by plugin
        for port in node_group.open_ports:
            nova_client.security_group_rules.create(
                security_group.id, 'tcp', port, port, "0.0.0.0/0")

        security_groups = list(node_group.security_groups or [])
        security_groups.append(security_group.id)
        conductor.node_group_update(context.ctx(), node_group,
                                    {"security_groups": security_groups})
        return security_groups
Пример #12
0
    def test_list_registered_images(self, url_for_mock):
        self.override_config('auth_uri', 'https://127.0.0.1:8080/v3/',
                             'keystone_authtoken')
        some_images = [
            FakeImage('foo', ['bar', 'baz'], 'test'),
            FakeImage('baz', [], 'test'),
            FakeImage('spam', [], "")]

        with mock.patch('novaclient.v2.images.ImageManager.list',
                        return_value=some_images):
            nova = nova_client.client()

            images = nova.images.list_registered()
            self.assertEqual(2, len(images))

            images = nova.images.list_registered(name='foo')
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)
            self.assertEqual('test', images[0].username)

            images = nova.images.list_registered(name='eggs')
            self.assertEqual(0, len(images))

            images = nova.images.list_registered(tags=['bar'])
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)

            images = nova.images.list_registered(tags=['bar', 'eggs'])
            self.assertEqual(0, len(images))
Пример #13
0
    def test_list_registered_images(self, url_for_mock):
        some_images = [
            FakeImage('foo', ['bar', 'baz'], 'test'),
            FakeImage('baz', [], 'test'),
            FakeImage('spam', [], "")
        ]

        with mock.patch('novaclient.v1_1.images.ImageManager.list',
                        return_value=some_images):
            nova = nova_client.client()

            images = nova.images.list_registered()
            self.assertEqual(2, len(images))

            images = nova.images.list_registered(name='foo')
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)
            self.assertEqual('test', images[0].username)

            images = nova.images.list_registered(name='eggs')
            self.assertEqual(0, len(images))

            images = nova.images.list_registered(tags=['bar'])
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)

            images = nova.images.list_registered(tags=['bar', 'eggs'])
            self.assertEqual(0, len(images))
Пример #14
0
def _create_attach_volume(ctx,
                          instance,
                          size,
                          volume_type,
                          volume_local_to_instance,
                          name=None,
                          availability_zone=None):
    if CONF.cinder.api_version == 1:
        kwargs = {'size': size, 'display_name': name}
    else:
        kwargs = {'size': size, 'name': name}

    kwargs['volume_type'] = volume_type
    if availability_zone is not None:
        kwargs['availability_zone'] = availability_zone

    if volume_local_to_instance:
        kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}

    volume = b.execute_with_retries(cinder.client().volumes.create, **kwargs)
    conductor.append_volume(ctx, instance, volume.id)
    _await_available(volume)

    resp = b.execute_with_retries(nova.client().volumes.create_server_volume,
                                  instance.instance_id, volume.id, None)
    return resp.device
Пример #15
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set(reduce(
        operator.add, [[sg.id, sg.name] for sg in security_group_list], []))
    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.InvalidException(_("Security group '%s' not found") % sg)
Пример #16
0
    def test_list_registered_images(self, url_for_mock):
        some_images = [
            FakeImage('foo', ['bar', 'baz'], 'test'),
            FakeImage('baz', [], 'test'),
            FakeImage('spam', [], "")]

        with mock.patch('novaclient.v2.images.ImageManager.list',
                        return_value=some_images):
            nova = nova_client.client()

            images = nova.images.list_registered()
            self.assertEqual(2, len(images))

            images = nova.images.list_registered(name='foo')
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)
            self.assertEqual('test', images[0].username)

            images = nova.images.list_registered(name='eggs')
            self.assertEqual(0, len(images))

            images = nova.images.list_registered(tags=['bar'])
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)

            images = nova.images.list_registered(tags=['bar', 'eggs'])
            self.assertEqual(0, len(images))
Пример #17
0
def _create_attach_volume(ctx, instance, size, display_name=None,
                          volume_type=None):
    volume = cinder.client().volumes.create(size=size,
                                            display_name=display_name,
                                            volume_type=volume_type)
    conductor.append_volume(ctx, instance, volume.id)

    while volume.status != 'available':
        volume = cinder.get_volume(volume.id)
        if volume.status == 'error':
            raise ex.SystemError("Volume %s has error status" % volume.id)

        context.sleep(1)

    nova.client().volumes.create_server_volume(instance.instance_id,
                                               volume.id, None)
Пример #18
0
    def _create_auto_security_group(self, node_group):
        name = g.generate_auto_security_group_name(node_group)
        nova_client = nova.client()
        security_group = nova_client.security_groups.create(
            name, "Auto security group created by Sahara for Node Group '%s' "
            "of cluster '%s'." % (node_group.name, node_group.cluster.name))

        # ssh remote needs ssh port, agents are not implemented yet
        nova_client.security_group_rules.create(security_group.id, 'tcp',
                                                SSH_PORT, SSH_PORT,
                                                "0.0.0.0/0")

        # open all traffic for private networks
        if CONF.use_neutron:
            for cidr in neutron.get_private_network_cidrs(node_group.cluster):
                for protocol in ['tcp', 'udp']:
                    nova_client.security_group_rules.create(
                        security_group.id, protocol, 1, 65535, cidr)

                nova_client.security_group_rules.create(
                    security_group.id, 'icmp', -1, -1, cidr)

        # enable ports returned by plugin
        for port in node_group.open_ports:
            nova_client.security_group_rules.create(security_group.id, 'tcp',
                                                    port, port, "0.0.0.0/0")

        security_groups = list(node_group.security_groups or [])
        security_groups.append(security_group.id)
        conductor.node_group_update(context.ctx(), node_group,
                                    {"security_groups": security_groups})
        return security_groups
Пример #19
0
    def _delete_auto_security_group(self, node_group):
        if not node_group.auto_security_group:
            return

        if not node_group.security_groups:
            # node group has no security groups
            # nothing to delete
            return

        name = node_group.security_groups[-1]

        try:
            client = nova.client().security_groups
            security_group = client.get(name)
            if (security_group.name !=
                    g.generate_auto_security_group_name(node_group)):
                LOG.warning(
                    _LW("Auto security group for node group {name} is "
                        "not found").format(name=node_group.name))
                return
            client.delete(name)
        except Exception:
            LOG.warning(
                _LW("Failed to delete security group {name}").format(
                    name=name))
Пример #20
0
    def test_list_registered_images(self, url_for_mock):
        self.override_config('auth_uri', 'https://127.0.0.1:8080/v3/',
                             'keystone_authtoken')
        some_images = [
            FakeImage('foo', ['bar', 'baz'], 'test'),
            FakeImage('baz', [], 'test'),
            FakeImage('spam', [], "")
        ]

        with mock.patch('novaclient.v2.images.ImageManager.list',
                        return_value=some_images):
            nova = nova_client.client()

            images = nova.images.list_registered()
            self.assertEqual(2, len(images))

            images = nova.images.list_registered(name='foo')
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)
            self.assertEqual('test', images[0].username)

            images = nova.images.list_registered(name='eggs')
            self.assertEqual(0, len(images))

            images = nova.images.list_registered(tags=['bar'])
            self.assertEqual(1, len(images))
            self.assertEqual('foo', images[0].name)

            images = nova.images.list_registered(tags=['bar', 'eggs'])
            self.assertEqual(0, len(images))
Пример #21
0
    def _delete_aa_server_group(self, cluster):
        if cluster.anti_affinity:
            server_group_name = g.generate_aa_group_name(cluster.name)
            client = nova.client().server_groups

            server_groups = client.findall(name=server_group_name)
            if len(server_groups) == 1:
                client.delete(server_groups[0].id)
Пример #22
0
def check_auto_security_group(cluster_name, nodegroup):
    if nodegroup.get('auto_security_group'):
        name = g.generate_auto_security_group_name(
            cluster_name, nodegroup['name'])
        if name in [security_group.name for security_group in
                    nova.client().security_groups.list()]:
            raise ex.NameAlreadyExistsException(
                _("Security group with name '%s' already exists") % name)
Пример #23
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set(
        reduce(operator.add, [[six.text_type(sg.id), sg.name]
                              for sg in security_group_list], []))
    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Пример #24
0
    def _delete_aa_server_group(self, cluster):
        if cluster.anti_affinity:
            server_group_name = g.generate_aa_group_name(cluster.name)
            client = nova.client().server_groups

            server_groups = client.findall(name=server_group_name)
            if len(server_groups) == 1:
                client.delete(server_groups[0].id)
Пример #25
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute']
    limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed'])
    limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed'])
    limits['instances'] = _sub_limit(lim['maxTotalInstances'],
                                     lim['totalInstancesUsed'])
    return limits
Пример #26
0
    def _delete_aa_server_group(self, cluster):
        if cluster.anti_affinity:
            server_group_name = g.generate_aa_group_name(cluster.name)
            client = nova.client().server_groups

            server_groups = b.execute_with_retries(client.findall,
                                                   name=server_group_name)
            if len(server_groups) == 1:
                b.execute_with_retries(client.delete, server_groups[0].id)
Пример #27
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute']
    limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed'])
    limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed'])
    limits['instances'] = _sub_limit(lim['maxTotalInstances'],
                                     lim['totalInstancesUsed'])
    return limits
Пример #28
0
    def _delete_aa_server_group(self, cluster):
        if cluster.anti_affinity:
            server_group_name = g.generate_aa_group_name(cluster.name)
            client = nova.client().server_groups

            server_groups = b.execute_with_retries(client.findall,
                                                   name=server_group_name)
            if len(server_groups) == 1:
                b.execute_with_retries(client.delete, server_groups[0].id)
Пример #29
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set(reduce(
        operator.add, [[six.text_type(sg.id), sg.name]
                       for sg in security_group_list], []))
    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(
                sg, _("Security group '%s' not found"))
Пример #30
0
    def _run_instance(self,
                      cluster,
                      node_group,
                      idx,
                      aa_group=None,
                      old_aa_groups=None):
        """Create instance using nova client and persist them into DB."""
        ctx = context.ctx()
        name = g.generate_instance_name(cluster.name, node_group.name, idx)

        userdata = self._generate_user_data_script(node_group, name)

        if old_aa_groups:
            # aa_groups: node process -> instance ids
            aa_ids = []
            for node_process in node_group.node_processes:
                aa_ids += old_aa_groups.get(node_process) or []

            # create instances only at hosts w/ no instances
            # w/ aa-enabled processes
            hints = {'different_host': sorted(set(aa_ids))} if aa_ids else None
        else:
            hints = {
                'group': aa_group
            } if (aa_group
                  and self._need_aa_server_group(node_group)) else None

        security_groups = self._map_security_groups(node_group.security_groups)
        nova_kwargs = {
            'scheduler_hints': hints,
            'userdata': userdata,
            'key_name': cluster.user_keypair_id,
            'security_groups': security_groups
        }

        if CONF.use_neutron:
            net_id = cluster.neutron_management_network
            nova_kwargs['nics'] = [{"net-id": net_id, "v4-fixed-ip": ""}]

        nova_instance = nova.client().servers.create(name,
                                                     node_group.get_image_id(),
                                                     node_group.flavor_id,
                                                     **nova_kwargs)
        instance_id = conductor.instance_add(ctx, node_group, {
            "instance_id": nova_instance.id,
            "instance_name": name
        })

        if old_aa_groups:
            # save instance id to aa_groups to support aa feature
            for node_process in node_group.node_processes:
                if node_process in cluster.anti_affinity:
                    aa_group_ids = old_aa_groups.get(node_process, [])
                    aa_group_ids.append(nova_instance.id)
                    old_aa_groups[node_process] = aa_group_ids

        return instance_id
Пример #31
0
def _create_attach_volume(ctx,
                          instance,
                          size,
                          display_name=None,
                          volume_type=None):
    volume = cinder.client().volumes.create(size=size,
                                            display_name=display_name,
                                            volume_type=volume_type)
    conductor.append_volume(ctx, instance, volume.id)

    while volume.status != 'available':
        volume = cinder.get_volume(volume.id)
        if volume.status == 'error':
            raise RuntimeError("Volume %s has error status" % volume.id)

        context.sleep(1)

    nova.client().volumes.create_server_volume(instance.instance_id, volume.id,
                                               None)
Пример #32
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set()
    for sg in security_group_list:
        allowed_groups.add(six.text_type(sg.id))
        allowed_groups.add(sg.name)

    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Пример #33
0
def check_auto_security_group(cluster_name, nodegroup):
    if nodegroup.get('auto_security_group'):
        name = g.generate_auto_security_group_name(cluster_name,
                                                   nodegroup['name'])
        if name in [
                security_group.name
                for security_group in nova.client().security_groups.list()
        ]:
            raise ex.NameAlreadyExistsException(
                _("Security group with name '%s' already exists") % name)
Пример #34
0
def check_security_groups_exist(security_groups):
    security_group_list = nova.client().security_groups.list()
    allowed_groups = set()
    for sg in security_group_list:
        allowed_groups.add(six.text_type(sg.id))
        allowed_groups.add(sg.name)

    for sg in security_groups:
        if sg not in allowed_groups:
            raise ex.NotFoundException(sg, _("Security group '%s' not found"))
Пример #35
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found"))
Пример #36
0
    def _create_aa_server_group(self, cluster):
        server_group_name = g.generate_aa_group_name(cluster.name)
        client = nova.client().server_groups

        if client.findall(name=server_group_name):
            raise exc.InvalidDataException(
                _("Server group with name %s is already exists")
                % server_group_name)

        server_group = client.create(name=server_group_name,
                                     policies=['anti-affinity'])
        return server_group.id
Пример #37
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found"))
Пример #38
0
    def _create_aa_server_group(self, cluster):
        server_group_name = g.generate_aa_group_name(cluster.name)
        client = nova.client().server_groups

        if client.findall(name=server_group_name):
            raise exc.InvalidDataException(
                _("Server group with name %s is already exists") %
                server_group_name)

        server_group = client.create(name=server_group_name,
                                     policies=['anti-affinity'])
        return server_group.id
Пример #39
0
    def test_set_description(self, set_meta):
        with mock.patch('sahara.utils.openstack.base.url_for'):
            nova = nova_client.client()
            nova.images.set_description('id', 'ubuntu')
            self.assertEqual(
                ('id', {'_sahara_username': '******'}), set_meta.call_args[0])

            nova.images.set_description('id', 'ubuntu', 'descr')
            self.assertEqual(
                ('id', {'_sahara_description': 'descr',
                        '_sahara_username': '******'}),
                set_meta.call_args[0])
Пример #40
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.InvalidException("Floating IP pool %s for node group "
                                  "'%s' not found" % (pool_id, ng_name))
Пример #41
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.InvalidException("Floating IP pool %s for node group "
                                  "'%s' not found" % (pool_id, ng_name))
Пример #42
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        # tmckay-fp, whoa, this suggests that we allow floating_ip_pools with
        # nova?  Can that be true? Scour for this
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found"))
Пример #43
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = neutron.get_network(pool_id)
    else:
        # tmckay-fp, whoa, this suggests that we allow floating_ip_pools with
        # nova?  Can that be true? Scour for this
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found"))
Пример #44
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.InvalidException(
            _("Floating IP pool %(pool)s for node group '%(group)s' "
              "not found") % {'pool': pool_id, 'group': ng_name})
Пример #45
0
    def _shutdown_instance(self, instance):
        ctx = context.ctx()

        if instance.node_group.floating_ip_pool:
            try:
                networks.delete_floating_ip(instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(
                    _LW("Attempted to delete non-existent floating IP "
                        "in pool {pool} from instance").format(
                            pool=instance.node_group.floating_ip_pool))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(_LW("Detaching volumes from instance failed"))

        try:
            nova.client().servers.delete(instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(_LW("Attempted to delete non-existent instance"))

        conductor.instance_remove(ctx, instance)
Пример #46
0
    def _find_aa_server_group(self, cluster):
        server_group_name = g.generate_aa_group_name(cluster.name)
        server_groups = nova.client().server_groups.findall(
            name=server_group_name)

        if len(server_groups) > 1:
            raise exc.IncorrectStateError(
                _("Several server groups with name %s found") %
                server_group_name)

        if len(server_groups) == 1:
            return server_groups[0].id

        return None
Пример #47
0
    def _find_aa_server_group(self, cluster):
        server_group_name = g.generate_aa_group_name(cluster.name)
        server_groups = nova.client().server_groups.findall(
            name=server_group_name)

        if len(server_groups) > 1:
            raise exc.IncorrectStateError(
                _("Several server groups with name %s found")
                % server_group_name)

        if len(server_groups) == 1:
            return server_groups[0].id

        return None
Пример #48
0
def _create_attach_volume(ctx, instance, size, display_name=None):
    volume = cinder.client().volumes.create(size=size,
                                            display_name=display_name)
    conductor.append_volume(ctx, instance, volume.id)

    while volume.status != 'available':
        volume = cinder.get_volume(volume.id)
        if volume.status == 'error':
            raise ex.SystemError(_("Volume %s has error status") % volume.id)

        context.sleep(1)

    resp = nova.client().volumes.create_server_volume(instance.instance_id,
                                                      volume.id, None)
    return resp.device
Пример #49
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = nova.limits.get().to_dict()['absolute']
    limits['ram'] = lim['maxTotalRAMSize'] - lim['totalRAMUsed']
    limits['cpu'] = lim['maxTotalCores'] - lim['totalCoresUsed']
    limits['instances'] = lim['maxTotalInstances'] - lim['totalInstancesUsed']
    if CONF.use_neutron:
        return limits
    if CONF.use_floating_ips:
        limits['floatingips'] = (
            lim['maxTotalFloatingIps'] - lim['totalFloatingIpsUsed'])
    limits['security_groups'] = (
        lim['maxSecurityGroups'] - lim['totalSecurityGroupsUsed'])
    limits['security_group_rules'] = lim['maxSecurityGroupRules']
    return limits
Пример #50
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = nova.limits.get().to_dict()['absolute']
    limits['ram'] = lim['maxTotalRAMSize'] - lim['totalRAMUsed']
    limits['cpu'] = lim['maxTotalCores'] - lim['totalCoresUsed']
    limits['instances'] = lim['maxTotalInstances'] - lim['totalInstancesUsed']
    if CONF.use_neutron:
        return limits
    if CONF.use_floating_ips:
        limits['floatingips'] = (lim['maxTotalFloatingIps'] -
                                 lim['totalFloatingIpsUsed'])
    limits['security_groups'] = (lim['maxSecurityGroups'] -
                                 lim['totalSecurityGroupsUsed'])
    limits['security_group_rules'] = lim['maxSecurityGroupRules']
    return limits
Пример #51
0
def check_floatingip_pool_exists(ng_name, pool_id):
    network = None
    if CONF.use_neutron:
        network = nova.get_network(id=pool_id)
    else:
        for net in nova.client().floating_ip_pools.list():
            if net.name == pool_id:
                network = net.name
                break

    if not network:
        raise ex.InvalidException(
            _("Floating IP pool %(pool)s for node group '%(group)s' "
              "not found") % {
                  'pool': pool_id,
                  'group': ng_name
              })
Пример #52
0
    def _map_security_groups(self, security_groups):
        if not security_groups:
            # Nothing to do here
            return None

        if CONF.use_neutron:
            # When using Neutron, ids work fine.
            return security_groups
        else:
            # Nova Network requires that security groups are passed by names.
            # security_groups.get method accepts both ID and names, so in case
            # IDs are provided they will be converted, otherwise the names will
            # just map to themselves.
            names = []
            for group_id_or_name in security_groups:
                group = nova.client().security_groups.get(group_id_or_name)
                names.append(group.name)
            return names
Пример #53
0
    def _shutdown_instance(self, instance):
        # Heat dissociates and deletes upon deletion of resources
        # See OS::Neutron::FloatingIP and OS::Neutron::FloatingIPAssociation
        if instance.node_group.floating_ip_pool:
            pass

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning("Detaching volumes from instance failed")

        try:
            b.execute_with_retries(nova.client().servers.delete,
                                   instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning("Attempted to delete non-existent instance")

        conductor.instance_remove(context.ctx(), instance)
Пример #54
0
    def _map_security_groups(self, security_groups):
        if not security_groups:
            # Nothing to do here
            return None

        if CONF.use_neutron:
            # When using Neutron, ids work fine.
            return security_groups
        else:
            # Nova Network requires that security groups are passed by names.
            # security_groups.get method accepts both ID and names, so in case
            # IDs are provided they will be converted, otherwise the names will
            # just map to themselves.
            names = []
            for group_id_or_name in security_groups:
                group = nova.client().security_groups.get(group_id_or_name)
                names.append(group.name)
            return names
Пример #55
0
def _update_limits_for_ng(limits, ng, count):
    sign = lambda x: (1, -1)[x < 0]
    nova = nova_client.client()
    limits['instances'] += count
    flavor = b.execute_with_retries(nova.flavors.get, ng.flavor_id)
    limits['ram'] += flavor.ram * count
    limits['cpu'] += flavor.vcpus * count
    # tmckay-fp this is fine, it will be zero without it
    if ng.floating_ip_pool:
        limits['floatingips'] += count
    if ng.volumes_per_node:
        limits['volumes'] += ng.volumes_per_node * count
        limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count
    if ng.auto_security_group:
        limits['security_groups'] += sign(count)
        # NOTE: +3 - all traffic for private network
        limits['security_group_rules'] += (
            (len(ng.open_ports) + 3) * sign(count))
    limits['ports'] += count
Пример #56
0
def _get_nova_limits():
    limits = {}
    nova = nova_client.client()
    lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute']
    limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed'])
    limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed'])
    limits['instances'] = _sub_limit(lim['maxTotalInstances'],
                                     lim['totalInstancesUsed'])
    if CONF.use_neutron:
        return limits

    # tmckay-fp here we would just get the limits all the time
    limits['floatingips'] = _sub_limit(lim['maxTotalFloatingIps'],
                                       lim['totalFloatingIpsUsed'])
    limits['security_groups'] = _sub_limit(lim['maxSecurityGroups'],
                                           lim['totalSecurityGroupsUsed'])
    limits['security_group_rules'] = _sub_limit(lim['maxSecurityGroupRules'],
                                                0)
    return limits
Пример #57
0
    def _delete_aa_server_groups(self, cluster):
        if cluster.anti_affinity:
            for i in range(1, cluster.anti_affinity_ratio):
                server_group_name = g.generate_aa_group_name(cluster.name, i)

                client = nova.client().server_groups

                server_groups = b.execute_with_retries(client.findall,
                                                       name=server_group_name)
                if len(server_groups) == 1:
                    b.execute_with_retries(client.delete, server_groups[0].id)
                '''In case the server group is created
                using mitaka or older version'''
                old_server_group_name = server_group_name.rsplit('-', 1)[0]
                server_groups_old = b.execute_with_retries(
                    client.findall, name=old_server_group_name)
                if len(server_groups_old) == 1:
                    b.execute_with_retries(client.delete,
                                           server_groups_old[0].id)
Пример #58
0
    def _shutdown_instance(self, instance):
        if instance.node_group.floating_ip_pool:
            try:
                b.execute_with_retries(networks.delete_floating_ip,
                                       instance.instance_id)
            except nova_exceptions.NotFound:
                LOG.warning(_LW("Attempted to delete non-existent floating IP "
                                "in pool {pool} from instance")
                            .format(pool=instance.node_group.floating_ip_pool))

        try:
            volumes.detach_from_instance(instance)
        except Exception:
            LOG.warning(_LW("Detaching volumes from instance failed"))

        try:
            b.execute_with_retries(nova.client().servers.delete,
                                   instance.instance_id)
        except nova_exceptions.NotFound:
            LOG.warning(_LW("Attempted to delete non-existent instance"))

        conductor.instance_remove(context.ctx(), instance)
Пример #59
0
def _create_attach_volume(ctx, instance, size, volume_type,
                          volume_local_to_instance, name=None,
                          availability_zone=None):
    if CONF.cinder.api_version == 1:
        kwargs = {'size': size, 'display_name': name}
    else:
        kwargs = {'size': size, 'name': name}

    kwargs['volume_type'] = volume_type
    if availability_zone is not None:
        kwargs['availability_zone'] = availability_zone

    if volume_local_to_instance:
        kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}

    volume = b.execute_with_retries(cinder.client().volumes.create, **kwargs)
    conductor.append_volume(ctx, instance, volume.id)
    _await_available(volume)

    resp = b.execute_with_retries(nova.client().volumes.create_server_volume,
                                  instance.instance_id, volume.id, None)
    return resp.device