Ejemplo n.º 1
0
    def _launch_instances(self, cluster, target_count, stages,
                          update_stack=False, disable_rollback=True,
                          instances_to_delete=None):
        # create all instances
        cluster = c_u.change_cluster_status(cluster, stages[0])

        inst_ids = self._create_instances(
            cluster, target_count, update_stack, disable_rollback,
            instances_to_delete)

        # wait for all instances are up and networks ready
        cluster = c_u.change_cluster_status(cluster, stages[1])

        instances = c_u.get_instances(cluster, inst_ids)

        self._await_networks(cluster, instances)

        # prepare all instances
        cluster = c_u.change_cluster_status(cluster, stages[2])

        instances = c_u.get_instances(cluster, inst_ids)

        volumes.mount_to_instances(instances)

        self._configure_instances(cluster)

        return inst_ids
Ejemplo n.º 2
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(cluster,
                                        c_u.CLUSTER_STATUS_DECOMMISSIONING)

    instances_to_delete = []

    for node_group in cluster.node_groups:
        new_count = node_group_id_map[node_group.id]
        if new_count < node_group.count:
            instances_to_delete += node_group.instances[new_count:node_group.
                                                        count]

    if instances_to_delete:
        context.set_step_type(_("Plugin: decommission cluster"))
        plugin.decommission_nodes(cluster, instances_to_delete)

    # Scaling infrastructure
    cluster = c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_SCALING)
    context.set_step_type(_("Engine: scale cluster"))
    instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

    # Setting up new nodes with the plugin
    if instance_ids:
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_CONFIGURING)
        instances = c_u.get_instances(cluster, instance_ids)
        context.set_step_type(_("Plugin: scale cluster"))
        plugin.scale_cluster(cluster, instances)

    c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 3
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_DECOMMISSIONING)

    instances_to_delete = []

    for node_group in cluster.node_groups:
        new_count = node_group_id_map[node_group.id]
        if new_count < node_group.count:
            instances_to_delete += node_group.instances[new_count:
                                                        node_group.count]

    if instances_to_delete:
        context.set_step_type(_("Plugin: decommission cluster"))
        plugin.decommission_nodes(cluster, instances_to_delete)

    # Scaling infrastructure
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_SCALING)
    context.set_step_type(_("Engine: scale cluster"))
    instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

    # Setting up new nodes with the plugin
    if instance_ids:
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_CONFIGURING)
        instances = c_u.get_instances(cluster, instance_ids)
        context.set_step_type(_("Plugin: scale cluster"))
        plugin.scale_cluster(cluster, instances)

    c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
Ejemplo n.º 4
0
    def _launch_instances(self,
                          cluster,
                          target_count,
                          stages,
                          update_stack=False,
                          disable_rollback=True):
        # create all instances
        cluster = c_u.change_cluster_status(cluster, stages[0])

        inst_ids = self._create_instances(cluster, target_count, update_stack,
                                          disable_rollback)

        # wait for all instances are up and networks ready
        cluster = c_u.change_cluster_status(cluster, stages[1])

        instances = c_u.get_instances(cluster, inst_ids)

        self._await_networks(cluster, instances)

        # prepare all instances
        cluster = c_u.change_cluster_status(cluster, stages[2])

        instances = c_u.get_instances(cluster, inst_ids)

        volumes.mount_to_instances(instances)

        self._configure_instances(cluster)

        return inst_ids
Ejemplo n.º 5
0
    def _scale_cluster_instances(self, cluster, node_group_id_map):
        ctx = context.ctx()

        aa_group = None
        old_aa_groups = None
        if cluster.anti_affinity:
            aa_group = self._find_aa_server_group(cluster)
            if not aa_group:
                old_aa_groups = self._generate_anti_affinity_groups(cluster)

        instances_to_delete = []
        node_groups_to_enlarge = set()
        node_groups_to_delete = set()

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]

            if new_count < node_group.count:
                instances_to_delete += node_group.instances[new_count:
                                                            node_group.count]
                if new_count == 0:
                    node_groups_to_delete.add(node_group.id)
            elif new_count > node_group.count:
                node_groups_to_enlarge.add(node_group.id)
                if node_group.count == 0 and node_group.auto_security_group:
                    self._create_auto_security_group(node_group)

        if instances_to_delete:
            cluster = c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_DELETING_INSTANCES)

            for instance in instances_to_delete:
                with context.set_current_instance_id(instance.instance_id):
                    self._shutdown_instance(instance)

        self._await_deleted(cluster, instances_to_delete)
        for ng in cluster.node_groups:
            if ng.id in node_groups_to_delete:
                self._delete_auto_security_group(ng)

        cluster = conductor.cluster_get(ctx, cluster)
        instances_to_add = []
        if node_groups_to_enlarge:

            cpo.add_provisioning_step(
                cluster.id, _("Add instances"),
                self._count_instances_to_scale(
                    node_groups_to_enlarge, node_group_id_map, cluster))

            cluster = c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_ADDING_INSTANCES)
            for ng in cluster.node_groups:
                if ng.id in node_groups_to_enlarge:
                    count = node_group_id_map[ng.id]
                    for idx in six.moves.xrange(ng.count + 1, count + 1):
                        instance_id = self._start_instance(
                            cluster, ng, idx, aa_group, old_aa_groups)
                        instances_to_add.append(instance_id)

        return instances_to_add
Ejemplo n.º 6
0
    def create_cluster(self, cluster):
        _warning_logger()
        ctx = context.ctx()
        self._update_rollback_strategy(cluster, shutdown=True)

        # create all instances
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_SPAWNING)
        self._create_instances(cluster)

        # wait for all instances are up and networks ready
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_WAITING)
        instances = c_u.get_instances(cluster)

        self._await_active(cluster, instances)

        self._assign_floating_ips(instances)

        self._await_networks(cluster, instances)

        cluster = conductor.cluster_get(ctx, cluster)

        # attach volumes
        volumes.attach_to_instances(c_u.get_instances(cluster))

        # prepare all instances
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_PREPARING)

        self._configure_instances(cluster)

        self._update_rollback_strategy(cluster)
Ejemplo n.º 7
0
 def test_change_cluster_status(self):
     cluster = self._make_sample()
     cluster = cluster_utils.change_cluster_status(
         cluster, "Deleting", "desc")
     self.assertEqual("Deleting", cluster.status)
     self.assertEqual("desc", cluster.status_description)
     cluster_utils.change_cluster_status(cluster, "Spawning")
     self.assertEqual("Deleting", cluster.status)
Ejemplo n.º 8
0
 def test_change_cluster_status(self):
     cluster = self._make_sample()
     cluster = cluster_utils.change_cluster_status(
         cluster, cluster_utils.CLUSTER_STATUS_DELETING, "desc")
     self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
     self.assertEqual("desc", cluster.status_description)
     cluster_utils.change_cluster_status(
         cluster, cluster_utils.CLUSTER_STATUS_SPAWNING)
     self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
Ejemplo n.º 9
0
 def test_change_cluster_status(self):
     cluster = self._make_sample()
     cluster = cluster_utils.change_cluster_status(
         cluster, cluster_utils.CLUSTER_STATUS_DELETING, "desc")
     self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
     self.assertEqual("desc", cluster.status_description)
     cluster_utils.change_cluster_status(
         cluster, cluster_utils.CLUSTER_STATUS_SPAWNING)
     self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
Ejemplo n.º 10
0
def scale_cluster(id, data):
    context.set_current_cluster_id(id)
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    existing_node_groups = data.get('resize_node_groups', [])
    additional_node_groups = data.get('add_node_groups', [])

    # the next map is the main object we will work with
    # to_be_enlarged : {node_group_id: desired_amount_of_instances}
    to_be_enlarged = {}
    node_group_instance_map = {}
    for ng in existing_node_groups:
        ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
        to_be_enlarged.update({ng_id: ng['count']})
        if 'instances' in ng:
            node_group_instance_map.update({ng_id: ng['instances']})

    additional = construct_ngs_for_scaling(cluster, additional_node_groups)
    cluster = conductor.cluster_get(ctx, cluster)
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    try:
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_VALIDATING)
        quotas.check_scaling(cluster, to_be_enlarged, additional)
        plugin.recommend_configs(cluster, scaling=True)
        plugin.validate_scaling(cluster, to_be_enlarged, additional)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            c_u.clean_cluster_from_empty_ng(cluster)
            c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e))

    # If we are here validation is successful.
    # So let's update to_be_enlarged map:
    to_be_enlarged.update(additional)

    for node_group in cluster.node_groups:
        if node_group.id not in to_be_enlarged:
            to_be_enlarged[node_group.id] = node_group.count

    api.OPS.provision_scaled_cluster(id, to_be_enlarged,
                                     node_group_instance_map)
    return cluster
Ejemplo n.º 11
0
def scale_cluster(id, data):
    context.set_current_cluster_id(id)
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    existing_node_groups = data.get('resize_node_groups', [])
    additional_node_groups = data.get('add_node_groups', [])

    # the next map is the main object we will work with
    # to_be_enlarged : {node_group_id: desired_amount_of_instances}
    to_be_enlarged = {}
    node_group_instance_map = {}
    for ng in existing_node_groups:
        ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
        to_be_enlarged.update({ng_id: ng['count']})
        if 'instances' in ng:
            node_group_instance_map.update({ng_id: ng['instances']})

    additional = construct_ngs_for_scaling(cluster, additional_node_groups)
    cluster = conductor.cluster_get(ctx, cluster)
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    try:
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_VALIDATING)
        quotas.check_scaling(cluster, to_be_enlarged, additional)
        plugin.recommend_configs(cluster, scaling=True)
        plugin.validate_scaling(cluster, to_be_enlarged, additional)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            c_u.clean_cluster_from_empty_ng(cluster)
            c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,
                                      six.text_type(e))

    # If we are here validation is successful.
    # So let's update to_be_enlarged map:
    to_be_enlarged.update(additional)

    for node_group in cluster.node_groups:
        if node_group.id not in to_be_enlarged:
            to_be_enlarged[node_group.id] = node_group.count

    api.OPS.provision_scaled_cluster(id, to_be_enlarged,
                                     node_group_instance_map)
    return cluster
Ejemplo n.º 12
0
def terminate_cluster(id, force=False):
    context.set_current_cluster_id(id)
    cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING)

    if cluster is None:
        return

    api.OPS.terminate_cluster(id, force)
    sender.status_notify(cluster.id, cluster.name, cluster.status, "delete")
Ejemplo n.º 13
0
def terminate_cluster(id, force=False):
    context.set_current_cluster_id(id)
    cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING)

    if cluster is None:
        return

    api.OPS.terminate_cluster(id, force)
    sender.status_notify(cluster.id, cluster.name, cluster.status,
                         "delete")
Ejemplo n.º 14
0
Archivo: ops.py Proyecto: crobby/sahara
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        cluster = _update_sahara_info(ctx, cluster)

        # updating cluster infra
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
        plugin.update_infra(cluster)

        # creating instances and configuring them
        cluster = conductor.cluster_get(ctx, cluster_id)
        context.set_step_type(_("Engine: create cluster"))
        INFRA.create_cluster(cluster)

        # configure cluster
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_CONFIGURING)
        shares.mount_shares(cluster)

        context.set_step_type(_("Plugin: configure cluster"))
        plugin.configure_cluster(cluster)

        # starting prepared and configured cluster
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_STARTING)

        context.set_step_type(_("Plugin: start cluster"))
        plugin.start_cluster(cluster)

        # cluster is now up and ready
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_ACTIVE)

        # schedule execution pending job for cluster
        for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
            job_manager.run_job(je.id)

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 15
0
Archivo: api.py Proyecto: crobby/sahara
def terminate_cluster(id):
    context.set_current_cluster_id(id)
    cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING)

    if cluster is None:
        return

    OPS.terminate_cluster(id)
    sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
                  "delete")
Ejemplo n.º 16
0
def terminate_cluster(id):
    context.set_current_cluster_id(id)
    cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING)

    if cluster is None:
        return

    OPS.terminate_cluster(id)
    sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
                  "delete")
Ejemplo n.º 17
0
Archivo: ops.py Proyecto: uladz/sahara
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        cluster = _update_sahara_info(ctx, cluster)

        # updating cluster infra
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_INFRAUPDATING)
        plugin.update_infra(cluster)

        # creating instances and configuring them
        cluster = conductor.cluster_get(ctx, cluster_id)
        context.set_step_type(_("Engine: create cluster"))
        INFRA.create_cluster(cluster)

        # configure cluster
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_CONFIGURING)
        shares.mount_shares(cluster)

        context.set_step_type(_("Plugin: configure cluster"))
        plugin.configure_cluster(cluster)

        # starting prepared and configured cluster
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_STARTING)

        context.set_step_type(_("Plugin: start cluster"))
        plugin.start_cluster(cluster)

        # cluster is now up and ready
        cluster = c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)

        # schedule execution pending job for cluster
        for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
            job_manager.run_job(je.id)

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 18
0
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    cluster = _update_sahara_info(ctx, cluster)

    # updating cluster infra
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    context.set_step_type(_("Engine: create cluster"))
    INFRA.create_cluster(cluster)
    ntp_service.configure_ntp(cluster_id)

    # configure cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_CONFIGURING)
    context.set_step_type(_("Plugin: configure cluster"))
    if hasattr(plugin, 'validate_images'):
        plugin.validate_images(cluster, test_only=False)
    shares.mount_shares(cluster)
    plugin.configure_cluster(cluster)

    # starting prepared and configured cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_STARTING)

    context.set_step_type(_("Plugin: start cluster"))
    plugin.start_cluster(cluster)

    # cluster is now up and ready
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_ACTIVE)

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)

    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 19
0
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    cluster = _update_sahara_info(ctx, cluster)

    # updating cluster infra
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    context.set_step_type(_("Engine: create cluster"))
    INFRA.create_cluster(cluster)

    # configure cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_CONFIGURING)
    context.set_step_type(_("Plugin: configure cluster"))
    if hasattr(plugin, 'validate_images'):
        plugin.validate_images(cluster, test_only=False)
    shares.mount_shares(cluster)
    plugin.configure_cluster(cluster)

    # starting prepared and configured cluster
    ntp_service.configure_ntp(cluster_id)
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_STARTING)

    context.set_step_type(_("Plugin: start cluster"))
    plugin.start_cluster(cluster)

    # cluster is now up and ready
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_ACTIVE)

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)

    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 20
0
def _cluster_create(values, plugin):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    context.set_current_cluster_id(cluster.id)
    sender.status_notify(cluster.id, cluster.name, "New", "create")
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    # validating cluster
    try:
        plugin.recommend_configs(cluster)
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_VALIDATING)
        plugin.validate(cluster)
        quotas.check_cluster(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ERROR,
                                      six.text_type(e))

    api.OPS.provision_cluster(cluster.id)

    return cluster
Ejemplo n.º 21
0
def _cluster_create(values, plugin):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    context.set_current_cluster_id(cluster.id)
    sender.status_notify(cluster.id, cluster.name, "New",
                         "create")
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    # validating cluster
    try:
        plugin.recommend_configs(cluster)
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_VALIDATING)
        plugin.validate(cluster)
        quotas.check_cluster(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_ERROR, six.text_type(e))

    api.OPS.provision_cluster(cluster.id)

    return cluster
Ejemplo n.º 22
0
def _provision_scaled_cluster(cluster_id,
                              node_group_id_map,
                              node_group_instance_map=None):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(cluster,
                                        c_u.CLUSTER_STATUS_DECOMMISSIONING)

    try:
        instances_to_delete = []
        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                if (node_group_instance_map
                        and node_group.id in node_group_instance_map):
                    for instance_ref in node_group_instance_map[node_group.id]:
                        instance = _get_instance_obj(node_group.instances,
                                                     instance_ref)
                        instances_to_delete.append(instance)

                while node_group.count - new_count > len(instances_to_delete):
                    instances_to_delete.append(
                        _get_random_instance_from_ng(node_group.instances,
                                                     instances_to_delete))

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_SCALING)
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map,
                                           instances_to_delete)
        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id, instance_ids)
            cluster = c_u.change_cluster_status(cluster,
                                                c_u.CLUSTER_STATUS_CONFIGURING)
            instances = c_u.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
        _refresh_health_for_cluster(cluster_id)

    except Exception as e:
        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,
                                  six.text_type(e))
Ejemplo n.º 23
0
Archivo: ops.py Proyecto: uladz/sahara
        def wrapper(cluster_id, *args, **kwds):
            ctx = context.ctx()
            try:
                # Clearing status description before executing
                c_u.change_cluster_status_description(cluster_id, "")
                f(cluster_id, *args, **kwds)
            except Exception as ex:
                # something happened during cluster operation
                cluster = conductor.cluster_get(ctx, cluster_id)
                # check if cluster still exists (it might have been removed)
                if (cluster is None
                        or cluster.status == c_u.CLUSTER_STATUS_DELETING):
                    LOG.debug("Cluster was deleted or marked for deletion. "
                              "Canceling current operation.")
                    return

                msg = six.text_type(ex)
                LOG.exception(
                    _LE("Error during operating on cluster (reason: "
                        "{reason})").format(reason=msg))

                try:
                    # trying to rollback
                    desc = description.format(reason=msg)
                    if _rollback_cluster(cluster, ex):
                        c_u.change_cluster_status(cluster,
                                                  c_u.CLUSTER_STATUS_ACTIVE,
                                                  desc)
                    else:
                        c_u.change_cluster_status(cluster,
                                                  c_u.CLUSTER_STATUS_ERROR,
                                                  desc)
                except Exception as rex:
                    cluster = conductor.cluster_get(ctx, cluster_id)
                    # check if cluster still exists (it might have been
                    # removed during rollback)
                    if (cluster is None
                            or cluster.status == c_u.CLUSTER_STATUS_DELETING):
                        LOG.debug("Cluster was deleted or marked for deletion."
                                  " Canceling current operation.")
                        return

                    LOG.exception(
                        _LE("Error during rollback of cluster (reason:"
                            " {reason})").format(reason=six.text_type(rex)))
                    desc = "{0}, {1}".format(msg, six.text_type(rex))
                    c_u.change_cluster_status(cluster,
                                              c_u.CLUSTER_STATUS_ERROR,
                                              description.format(reason=desc))
Ejemplo n.º 24
0
        def wrapper(cluster_id, *args, **kwds):
            ctx = context.ctx()
            try:
                # Clearing status description before executing
                c_u.change_cluster_status_description(cluster_id, "")
                f(cluster_id, *args, **kwds)
            except Exception as ex:
                # something happened during cluster operation
                cluster = conductor.cluster_get(ctx, cluster_id)
                # check if cluster still exists (it might have been removed)
                if (cluster is None or
                        cluster.status == c_u.CLUSTER_STATUS_DELETING):
                    LOG.debug("Cluster was deleted or marked for deletion. "
                              "Canceling current operation.")
                    return

                msg = six.text_type(ex)
                LOG.exception("Error during operating on cluster (reason: "
                              "{reason})".format(reason=msg))

                try:
                    # trying to rollback
                    desc = description.format(reason=msg)
                    if _rollback_cluster(cluster, ex):
                        c_u.change_cluster_status(
                            cluster, c_u.CLUSTER_STATUS_ACTIVE, desc)
                    else:
                        c_u.change_cluster_status(
                            cluster, c_u.CLUSTER_STATUS_ERROR, desc)
                except Exception as rex:
                    cluster = conductor.cluster_get(ctx, cluster_id)
                    # check if cluster still exists (it might have been
                    # removed during rollback)
                    if (cluster is None or
                            cluster.status == c_u.CLUSTER_STATUS_DELETING):
                        LOG.debug("Cluster was deleted or marked for deletion."
                                  " Canceling current operation.")
                        return

                    LOG.exception(
                        "Error during rollback of cluster (reason:"
                        " {reason})".format(reason=six.text_type(rex)))
                    desc = "{0}, {1}".format(msg, six.text_type(rex))
                    c_u.change_cluster_status(
                        cluster, c_u.CLUSTER_STATUS_ERROR,
                        description.format(reason=desc))
Ejemplo n.º 25
0
    def scale_cluster(self, cluster, node_group_id_map):
        _warning_logger()
        ctx = context.ctx()
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_SCALING_SPAWNING)

        instance_ids = self._scale_cluster_instances(cluster,
                                                     node_group_id_map)

        self._update_rollback_strategy(cluster, instance_ids=instance_ids)

        cluster = conductor.cluster_get(ctx, cluster)
        c_u.clean_cluster_from_empty_ng(cluster)

        cluster = conductor.cluster_get(ctx, cluster)
        instances = c_u.get_instances(cluster, instance_ids)

        self._await_active(cluster, instances)

        self._assign_floating_ips(instances)

        self._await_networks(cluster, instances)

        cluster = conductor.cluster_get(ctx, cluster)

        volumes.attach_to_instances(
            c_u.get_instances(cluster, instance_ids))

        # we should be here with valid cluster: if instances creation
        # was not successful all extra-instances will be removed above
        if instance_ids:
            self._configure_instances(cluster)

        self._update_rollback_strategy(cluster)

        return instance_ids
Ejemplo n.º 26
0
def _provision_scaled_cluster(cluster_id, node_group_id_map,
                              node_group_instance_map=None):
    """Provision scaled cluster.

    :param cluster_id: Id of cluster to be scaled.

    :param node_group_id_map: Dictionary in the format
                   node_group_id: number of instances.

    :param node_group_instance_map: Specifies the instances to be removed in
                   each node group.
    """
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_DECOMMISSIONING)

    try:
        instances_to_delete = []
        for node_group in cluster.node_groups:
            ng_inst_to_delete_count = 0
            # new_count is the new number of instance on the current node group
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                # Adding selected instances to delete to the list
                if _specific_inst_to_delete(node_group,
                                            node_group_instance_map):
                    for instance_ref in node_group_instance_map[node_group.id]:
                        instances_to_delete.append(_get_instance_obj(
                            node_group.instances, instance_ref))
                        ng_inst_to_delete_count += 1

                # Adding random instances to the list when the number of
                # specific instances does not equals the difference between the
                # current count and the new count of instances.
                while node_group.count - new_count > ng_inst_to_delete_count:
                    instances_to_delete.append(_get_random_instance_from_ng(
                        node_group.instances, instances_to_delete))
                    ng_inst_to_delete_count += 1

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_SCALING)
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map,
                                           instances_to_delete)
        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id, instance_ids)
            cluster = c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_CONFIGURING)
            instances = c_u.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
        _refresh_health_for_cluster(cluster_id)

    except Exception as e:
        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,
                                  six.text_type(e))