Exemple #1
0
def validate_additional_ng_scaling(cluster, additional):
    rm = vu.get_resourcemanager(cluster)
    scalable_processes = _get_scalable_processes()

    for ng_id in additional:
        ng = gu.get_by_id(cluster.node_groups, ng_id)
        if not set(ng.node_processes).issubset(scalable_processes):
            msg = "Vanilla plugin cannot scale nodegroup with processes: %s"
            raise ex.NodeGroupCannotBeScaled(ng.name,
                                             msg % ' '.join(ng.node_processes))

        if not rm and 'nodemanager' in ng.node_processes:
            msg = ("Vanilla plugin cannot scale node group with processes "
                   "which have no master-processes run in cluster")
            raise ex.NodeGroupCannotBeScaled(ng.name, msg)
Exemple #2
0
    def _validate_additional_ng_scaling(self, cluster, additional):
        jt = u.get_jobtracker(cluster)
        scalable_processes = self._get_scalable_processes()

        for ng_id in additional:
            ng = self._get_by_id(cluster.node_groups, ng_id)
            if not set(ng.node_processes).issubset(scalable_processes):
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, "Intel plugin cannot scale nodegroup"
                    " with processes: " + ' '.join(ng.node_processes))
            if not jt and 'tasktracker' in ng.node_processes:
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, "Intel plugin cannot scale node group with "
                    "processes which have no master-processes run "
                    "in cluster")
Exemple #3
0
def validate_additional_ng_scaling(cluster, additional):
    rm = cu.get_resourcemanager(cluster)
    scalable_processes = _get_scalable_processes()

    for ng_id in additional:
        ng = gu.get_by_id(cluster.node_groups, ng_id)
        if not set(ng.node_processes).issubset(scalable_processes):
            msg = _("CDH plugin cannot scale nodegroup with processes: "
                    "%(processes)s")
            raise ex.NodeGroupCannotBeScaled(
                ng.name, msg % {'processes': ' '.join(ng.node_processes)})

        if not rm and 'NODEMANAGER' in ng.node_processes:
            msg = _("CDH plugin cannot scale node group with processes "
                    "which have no master-processes run in cluster")
            raise ex.NodeGroupCannotBeScaled(ng.name, msg)
Exemple #4
0
    def _validate_additional_ng_scaling(self, cluster, additional):
        scalable_processes = self._get_scalable_processes()

        for ng_id in additional:
            ng = ug.get_by_id(cluster.node_groups, ng_id)
            if not set(ng.node_processes).issubset(scalable_processes):
                raise ex.NodeGroupCannotBeScaled(
                    ng.name,
                    _("Spark plugin cannot scale nodegroup"
                      " with processes: %s") % ' '.join(ng.node_processes))
Exemple #5
0
 def _validate_existing_ng_scaling(self, cluster, existing):
     scalable_processes = self._get_scalable_processes()
     dn_to_delete = 0
     for ng in cluster.node_groups:
         if ng.id in existing:
             if ng.count > existing[ng.id] and "datanode" in \
                     ng.node_processes:
                 dn_to_delete += ng.count - existing[ng.id]
             if not set(ng.node_processes).issubset(scalable_processes):
                 raise ex.NodeGroupCannotBeScaled(
                     ng.name, "Intel plugin cannot scale nodegroup"
                     " with processes: " + ' '.join(ng.node_processes))
Exemple #6
0
def validate_existing_ng_scaling(cluster, existing):
    scalable_processes = _get_scalable_processes()
    dn_to_delete = 0
    for ng in cluster.node_groups:
        if ng.id in existing:
            if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
                dn_to_delete += ng.count - existing[ng.id]

            if not set(ng.node_processes).issubset(scalable_processes):
                msg = _("CDH plugin cannot scale nodegroup with processes: "
                        "%(processes)s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % {'processes': ' '.join(ng.node_processes)})
Exemple #7
0
def validate_existing_ng_scaling(cluster, existing):
    scalable_processes = _get_scalable_processes()
    dn_to_delete = 0
    for ng in cluster.node_groups:
        if ng.id in existing:
            if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
                dn_to_delete += ng.count - existing[ng.id]

            if not set(ng.node_processes).issubset(scalable_processes):
                msg = ("Vanilla plugin cannot scale nodegroup "
                       "with processes: %s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % ' '.join(ng.node_processes))

    dn_amount = len(vu.get_datanodes(cluster))
    rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication', cluster)

    if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
        msg = ("Vanilla plugin cannot shrink cluster because it would be not "
               "enough nodes for replicas (replication factor is %s)")
        raise ex.ClusterCannotBeScaled(cluster.name, msg % rep_factor)
Exemple #8
0
    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode"
                                                   in ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name,
                        _("Spark plugin cannot scale nodegroup"
                          " with processes: %s") % ' '.join(ng.node_processes))

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = c_helper.get_config_value('HDFS', "dfs.replication",
                                               cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name,
                _("Spark plugin cannot shrink cluster because "
                  "there would be not enough nodes for HDFS "
                  "replicas (replication factor is %s)") % rep_factor)