Beispiel #1
0
    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode"
                                                   in ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name,
                        _("Spark plugin cannot scale nodegroup"
                          " with processes: %s") % ' '.join(ng.node_processes))

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name,
                _("Spark plugin cannot shrink cluster because "
                  "there would be not enough nodes for HDFS "
                  "replicas (replication factor is %s)") % rep_factor)
    def get_cluster_spec(self,
                         cluster,
                         user_inputs,
                         scaled_groups=None,
                         cluster_template=None):
        if cluster_template:
            cluster_spec = cs.ClusterSpec(cluster_template)
        else:
            if scaled_groups:
                for ng in cluster.node_groups:
                    ng_id = ng['id']
                    if (ng_id in scaled_groups
                            and ng['count'] > scaled_groups[ng_id]):
                        raise ex.ClusterCannotBeScaled(
                            cluster.name,
                            _('The HDP plugin does not support '
                              'the decommissioning of nodes '
                              'for HDP version 1.3.2'))

            cluster_spec = self.get_default_cluster_configuration()
            cluster_spec.create_operational_config(cluster, user_inputs,
                                                   scaled_groups)

            cs.validate_number_of_datanodes(cluster, scaled_groups,
                                            self.get_config_items())

        return cluster_spec
def validate_zookeeper_node_count(zk_ng, existing, additional):
    zk_amount = 0
    for ng in zk_ng:
        if ng.id in existing:
            zk_amount += existing[ng.id]
        else:
            zk_amount += ng.count

    for ng_id in additional:
        ng = u.get_by_id(zk_ng, ng_id)
        if "zookeeper" in ng.node_processes:
            zk_amount += ng.count

    if (zk_amount % 2) != 1:
        msg = _("Vanilla plugin cannot scale cluster because it must keep"
                " zookeeper service in odd.")
        raise ex.ClusterCannotBeScaled(zk_ng[0].cluster.name, msg)
Beispiel #4
0
def validate_existing_ng_scaling(cluster, existing):
    scalable_processes = _get_scalable_processes()
    dn_to_delete = 0
    for ng in cluster.node_groups:
        if ng.id in existing:
            if (ng.count > existing[ng.id]
                    and 'HDFS_DATANODE' in ng.node_processes):
                dn_to_delete += ng.count - existing[ng.id]

            if not set(ng.node_processes).issubset(scalable_processes):
                msg = _("CDH plugin cannot scale nodegroup with processes: "
                        "%(processes)s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % {'processes': ' '.join(ng.node_processes)})

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE') - dn_to_delete
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.ClusterCannotBeScaled(
            cluster,
            _('Number of datanodes must be not less than dfs_replication.'))
Beispiel #5
0
def validate_existing_ng_scaling(pctx, cluster, existing):
    scalable_processes = _get_scalable_processes()
    dn_to_delete = 0
    for ng in cluster.node_groups:
        if ng.id in existing:
            if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
                dn_to_delete += ng.count - existing[ng.id]

            if not set(ng.node_processes).issubset(scalable_processes):
                msg = _("Vanilla plugin cannot scale nodegroup "
                        "with processes: %s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % ' '.join(ng.node_processes))

    dn_amount = len(vu.get_datanodes(cluster))
    rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)

    if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
        msg = _("Vanilla plugin cannot shrink cluster because it would be "
                "not enough nodes for replicas (replication factor is %s)")
        raise ex.ClusterCannotBeScaled(cluster.name, msg % rep_factor)