Ejemplo n.º 1
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(cluster,
                                        c_u.CLUSTER_STATUS_DECOMMISSIONING)

    instances_to_delete = []

    for node_group in cluster.node_groups:
        new_count = node_group_id_map[node_group.id]
        if new_count < node_group.count:
            instances_to_delete += node_group.instances[new_count:node_group.
                                                        count]

    if instances_to_delete:
        context.set_step_type(_("Plugin: decommission cluster"))
        plugin.decommission_nodes(cluster, instances_to_delete)

    # Scaling infrastructure
    cluster = c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_SCALING)
    context.set_step_type(_("Engine: scale cluster"))
    instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

    # Setting up new nodes with the plugin
    if instance_ids:
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_CONFIGURING)
        instances = c_u.get_instances(cluster, instance_ids)
        context.set_step_type(_("Plugin: scale cluster"))
        plugin.scale_cluster(cluster, instances)

    c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 2
0
 def test_configuring_ntp_unable_to_configure(self, cl_get, logger):
     instance = FakeInstance(["ubuntu", RuntimeError()], "1")
     ng = mock.Mock(instances=[instance])
     cl_get.return_value = mock.Mock(node_groups=[ng], cluster_configs={})
     ntp.configure_ntp('1')
     self.assertEqual([mock.call("Unable to configure NTP service")],
                      logger.call_args_list)
Ejemplo n.º 3
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_DECOMMISSIONING)

    instances_to_delete = []

    for node_group in cluster.node_groups:
        new_count = node_group_id_map[node_group.id]
        if new_count < node_group.count:
            instances_to_delete += node_group.instances[new_count:
                                                        node_group.count]

    if instances_to_delete:
        context.set_step_type(_("Plugin: decommission cluster"))
        plugin.decommission_nodes(cluster, instances_to_delete)

    # Scaling infrastructure
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_SCALING)
    context.set_step_type(_("Engine: scale cluster"))
    instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

    # Setting up new nodes with the plugin
    if instance_ids:
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_CONFIGURING)
        instances = c_u.get_instances(cluster, instance_ids)
        context.set_step_type(_("Plugin: scale cluster"))
        plugin.scale_cluster(cluster, instances)

    c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
Ejemplo n.º 4
0
 def test_configuring_success(self, cl_get, logger):
     instance = FakeInstance(
         ['centos', "cat", "batman", "vs", "superman", "boom"], "1")
     ng = mock.Mock(instances=[instance])
     cl_get.return_value = mock.Mock(node_groups=[ng], cluster_configs={})
     ntp.configure_ntp('1')
     self.assertEqual([mock.call("NTP successfully configured")],
                      logger.call_args_list)
Ejemplo n.º 5
0
 def test_is_ntp_enabled(self, ntp_url, cl_get):
     cl = mock.Mock(
         cluster_configs={'general': {
             "Enable NTP service": False
         }})
     cl_get.return_value = cl
     ntp.configure_ntp('1')
     self.assertEqual(0, ntp_url.call_count)
Ejemplo n.º 6
0
 def test_configuring_success(self, cl_get, logger):
     instance = FakeInstance(
         ['centos', "cat", "batman", "vs", "superman", "boom"], "1")
     ng = mock.Mock(instances=[instance])
     cl_get.return_value = mock.Mock(node_groups=[ng], cluster_configs={})
     ntp.configure_ntp('1')
     self.assertEqual([mock.call("NTP successfully configured")],
                      logger.call_args_list)
Ejemplo n.º 7
0
 def test_configuring_ntp_unable_to_configure(self, cl_get, logger):
     instance = FakeInstance(["ubuntu", RuntimeError()], "1")
     ng = mock.Mock(instances=[instance])
     cl_get.return_value = mock.Mock(
         node_groups=[ng], cluster_configs={})
     ntp.configure_ntp('1')
     self.assertEqual(
         [mock.call("Unable to configure NTP service")],
         logger.call_args_list)
Ejemplo n.º 8
0
def _provision_scaled_cluster(cluster_id,
                              node_group_id_map,
                              node_group_instance_map=None):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(cluster,
                                        c_u.CLUSTER_STATUS_DECOMMISSIONING)

    try:
        instances_to_delete = []
        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                if (node_group_instance_map
                        and node_group.id in node_group_instance_map):
                    for instance_ref in node_group_instance_map[node_group.id]:
                        instance = _get_instance_obj(node_group.instances,
                                                     instance_ref)
                        instances_to_delete.append(instance)

                while node_group.count - new_count > len(instances_to_delete):
                    instances_to_delete.append(
                        _get_random_instance_from_ng(node_group.instances,
                                                     instances_to_delete))

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_SCALING)
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map,
                                           instances_to_delete)
        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id, instance_ids)
            cluster = c_u.change_cluster_status(cluster,
                                                c_u.CLUSTER_STATUS_CONFIGURING)
            instances = c_u.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
        _refresh_health_for_cluster(cluster_id)

    except Exception as e:
        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,
                                  six.text_type(e))
Ejemplo n.º 9
0
Archivo: ops.py Proyecto: crobby/sahara
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        cluster = _update_sahara_info(ctx, cluster)

        # updating cluster infra
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
        plugin.update_infra(cluster)

        # creating instances and configuring them
        cluster = conductor.cluster_get(ctx, cluster_id)
        context.set_step_type(_("Engine: create cluster"))
        INFRA.create_cluster(cluster)

        # configure cluster
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_CONFIGURING)
        shares.mount_shares(cluster)

        context.set_step_type(_("Plugin: configure cluster"))
        plugin.configure_cluster(cluster)

        # starting prepared and configured cluster
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_STARTING)

        context.set_step_type(_("Plugin: start cluster"))
        plugin.start_cluster(cluster)

        # cluster is now up and ready
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_ACTIVE)

        # schedule execution pending job for cluster
        for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
            job_manager.run_job(je.id)

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 10
0
Archivo: ops.py Proyecto: uladz/sahara
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        cluster = _update_sahara_info(ctx, cluster)

        # updating cluster infra
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_INFRAUPDATING)
        plugin.update_infra(cluster)

        # creating instances and configuring them
        cluster = conductor.cluster_get(ctx, cluster_id)
        context.set_step_type(_("Engine: create cluster"))
        INFRA.create_cluster(cluster)

        # configure cluster
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_CONFIGURING)
        shares.mount_shares(cluster)

        context.set_step_type(_("Plugin: configure cluster"))
        plugin.configure_cluster(cluster)

        # starting prepared and configured cluster
        ntp_service.configure_ntp(cluster_id)
        cluster = c_u.change_cluster_status(cluster,
                                            c_u.CLUSTER_STATUS_STARTING)

        context.set_step_type(_("Plugin: start cluster"))
        plugin.start_cluster(cluster)

        # cluster is now up and ready
        cluster = c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)

        # schedule execution pending job for cluster
        for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
            job_manager.run_job(je.id)

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 11
0
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    cluster = _update_sahara_info(ctx, cluster)

    # updating cluster infra
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    context.set_step_type(_("Engine: create cluster"))
    INFRA.create_cluster(cluster)
    ntp_service.configure_ntp(cluster_id)

    # configure cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_CONFIGURING)
    context.set_step_type(_("Plugin: configure cluster"))
    if hasattr(plugin, 'validate_images'):
        plugin.validate_images(cluster, test_only=False)
    shares.mount_shares(cluster)
    plugin.configure_cluster(cluster)

    # starting prepared and configured cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_STARTING)

    context.set_step_type(_("Plugin: start cluster"))
    plugin.start_cluster(cluster)

    # cluster is now up and ready
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_ACTIVE)

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)

    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 12
0
def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    cluster = _update_sahara_info(ctx, cluster)

    # updating cluster infra
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_INFRAUPDATING)
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    context.set_step_type(_("Engine: create cluster"))
    INFRA.create_cluster(cluster)

    # configure cluster
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_CONFIGURING)
    context.set_step_type(_("Plugin: configure cluster"))
    if hasattr(plugin, 'validate_images'):
        plugin.validate_images(cluster, test_only=False)
    shares.mount_shares(cluster)
    plugin.configure_cluster(cluster)

    # starting prepared and configured cluster
    ntp_service.configure_ntp(cluster_id)
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_STARTING)

    context.set_step_type(_("Plugin: start cluster"))
    plugin.start_cluster(cluster)

    # cluster is now up and ready
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_ACTIVE)

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)

    _refresh_health_for_cluster(cluster_id)
Ejemplo n.º 13
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        # Decommissioning surplus nodes with the plugin
        cluster = g.change_cluster_status(cluster, "Decommissioning")

        instances_to_delete = []

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                instances_to_delete += node_group.instances[
                    new_count:node_group.count]

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = g.change_cluster_status(cluster, "Scaling")
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id)
            cluster = g.change_cluster_status(cluster, "Configuring")
            instances = g.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        g.change_cluster_status(cluster, "Active")

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 14
0
def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        # Decommissioning surplus nodes with the plugin
        cluster = g.change_cluster_status(cluster, "Decommissioning")

        instances_to_delete = []

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                instances_to_delete += node_group.instances[new_count:
                                                            node_group.count]

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = g.change_cluster_status(cluster, "Scaling")
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id)
            cluster = g.change_cluster_status(cluster, "Configuring")
            instances = g.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        g.change_cluster_status(cluster, "Active")

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
Ejemplo n.º 15
0
def _provision_scaled_cluster(cluster_id, node_group_id_map,
                              node_group_instance_map=None):
    """Provision scaled cluster.

    :param cluster_id: Id of cluster to be scaled.

    :param node_group_id_map: Dictionary in the format
                   node_group_id: number of instances.

    :param node_group_instance_map: Specifies the instances to be removed in
                   each node group.
    """
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = c_u.change_cluster_status(
        cluster, c_u.CLUSTER_STATUS_DECOMMISSIONING)

    try:
        instances_to_delete = []
        for node_group in cluster.node_groups:
            ng_inst_to_delete_count = 0
            # new_count is the new number of instance on the current node group
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                # Adding selected instances to delete to the list
                if _specific_inst_to_delete(node_group,
                                            node_group_instance_map):
                    for instance_ref in node_group_instance_map[node_group.id]:
                        instances_to_delete.append(_get_instance_obj(
                            node_group.instances, instance_ref))
                        ng_inst_to_delete_count += 1

                # Adding random instances to the list when the number of
                # specific instances does not equals the difference between the
                # current count and the new count of instances.
                while node_group.count - new_count > ng_inst_to_delete_count:
                    instances_to_delete.append(_get_random_instance_from_ng(
                        node_group.instances, instances_to_delete))
                    ng_inst_to_delete_count += 1

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = c_u.change_cluster_status(
            cluster, c_u.CLUSTER_STATUS_SCALING)
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map,
                                           instances_to_delete)
        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id, instance_ids)
            cluster = c_u.change_cluster_status(
                cluster, c_u.CLUSTER_STATUS_CONFIGURING)
            instances = c_u.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)
        _refresh_health_for_cluster(cluster_id)

    except Exception as e:
        c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,
                                  six.text_type(e))
Ejemplo n.º 16
0
 def test_is_ntp_enabled(self, ntp_url, cl_get):
     cl = mock.Mock(
         cluster_configs={'general': {"Enable NTP service": False}})
     cl_get.return_value = cl
     ntp.configure_ntp('1')
     self.assertEqual(0, ntp_url.call_count)