예제 #1
0
def validate_existing_ng_scaling(cluster, existing):
    scalable_processes = _get_scalable_processes()
    dn_to_delete = 0
    for ng in cluster.node_groups:
        if ng.id in existing:
            if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
                dn_to_delete += ng.count - existing[ng.id]

            if not set(ng.node_processes).issubset(scalable_processes):
                msg = ("Vanilla plugin cannot scale nodegroup "
                       "with processes: %s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % ' '.join(ng.node_processes))

    dn_amount = len(vu.get_datanodes(cluster))
    rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication', cluster)

    if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
        msg = ("Vanilla plugin cannot shrink cluster because it would be not "
               "enough nodes for replicas (replication factor is %s)")
        raise ex.ClusterCannotBeScaled(cluster.name, msg % rep_factor)
예제 #2
0
    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode"
                                                   in ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name,
                        _("Spark plugin cannot scale nodegroup"
                          " with processes: %s") % ' '.join(ng.node_processes))

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = c_helper.get_config_value('HDFS', "dfs.replication",
                                               cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name,
                _("Spark plugin cannot shrink cluster because "
                  "there would be not enough nodes for HDFS "
                  "replicas (replication factor is %s)") % rep_factor)
예제 #3
0
    def get_cluster_spec(self,
                         cluster,
                         user_inputs,
                         scaled_groups=None,
                         cluster_template=None):
        if cluster_template:
            cluster_spec = cs.ClusterSpec(cluster_template)
        else:
            if scaled_groups:
                for ng in cluster.node_groups:
                    ng_id = ng['id']
                    if (ng_id in scaled_groups
                            and ng['count'] > scaled_groups[ng_id]):
                        raise ex.ClusterCannotBeScaled(
                            cluster.name,
                            _('The HDP plugin does not support '
                              'the decommissioning of nodes '
                              'for HDP version 1.3.2'))

            cluster_spec = self.get_default_cluster_configuration()
            cluster_spec.create_operational_config(cluster, user_inputs,
                                                   scaled_groups)

        return cluster_spec
예제 #4
0
파일: plugin.py 프로젝트: savi-dev/sahara
 def validate_scaling(self, cluster, existing, additional):
     raise ex.ClusterCannotBeScaled("Scaling Spark clusters has not been"
                                    "implemented yet")