def validate_additional_ng_scaling(cluster, additional): rm = vu.get_resourcemanager(cluster) scalable_processes = _get_scalable_processes() for ng_id in additional: ng = u.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): msg = _("Vanilla plugin cannot scale nodegroup with processes: %s") raise ex.NodeGroupCannotBeScaled(ng.name, msg % ' '.join(ng.node_processes)) if not rm and 'nodemanager' in ng.node_processes: msg = _("Vanilla plugin cannot scale node group with processes " "which have no master-processes run in cluster") raise ex.NodeGroupCannotBeScaled(ng.name, msg)
def validate_additional_ng_scaling(cls, cluster, additional): rm = cls.PU.get_resourcemanager(cluster) scalable_processes = cls._get_scalable_processes() for ng_id in additional: ng = u.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): msg = _("CDH plugin cannot scale nodegroup with processes: " "%(processes)s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % {'processes': ' '.join(ng.node_processes)}) if not rm and 'YARN_NODEMANAGER' in ng.node_processes: msg = _("CDH plugin cannot scale node group with processes " "which have no master-processes run in cluster") raise ex.NodeGroupCannotBeScaled(ng.name, msg)
def _validate_existing_ng_scaling(self, cluster, existing): scalable_processes = self._get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if ng.count > existing[ng.id] and ("datanode" in ng.node_processes): dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( ng.name, _("Spark plugin cannot scale nodegroup" " with processes: %s") % ' '.join(ng.node_processes)) dn_amount = len(utils.get_instances(cluster, "datanode")) rep_factor = utils.get_config_value_or_default('HDFS', "dfs.replication", cluster) if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: raise ex.ClusterCannotBeScaled( cluster.name, _("Spark plugin cannot shrink cluster because " "there would be not enough nodes for HDFS " "replicas (replication factor is %s)") % rep_factor)
def _validate_additional_ng_scaling(self, cluster, additional): jt = vu.get_jobtracker(cluster) scalable_processes = self._get_scalable_processes() for ng_id in additional: ng = g.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( ng.name, _("Vanilla plugin cannot scale nodegroup" " with processes: %s") % ' '.join(ng.node_processes)) if not jt and 'tasktracker' in ng.node_processes: raise ex.NodeGroupCannotBeScaled( ng.name, _("Vanilla plugin cannot scale node group with " "processes which have no master-processes run " "in cluster"))
def _validate_existing_ng_scaling(self, cluster, existing): scalable_processes = self._get_scalable_processes() for ng in cluster.node_groups: if ng.id in existing: if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( ng.name, _("Storm plugin cannot scale nodegroup" " with processes: %s") % ' '.join(ng.node_processes))
def _validate_additional_ng_scaling(self, cluster, additional): scalable_processes = self._get_scalable_processes() for ng_id in additional: ng = ug.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( ng.name, _("Spark plugin cannot scale nodegroup" " with processes: %s") % ' '.join(ng.node_processes))
def validate_existing_ng_scaling(cluster, existing): scalable_processes = _get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if ng.count > existing[ng.id] and "datanode" in ng.node_processes: dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): msg = _("CDH plugin cannot scale nodegroup with processes: " "%(processes)s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % {'processes': ' '.join(ng.node_processes)})
def validate_existing_ng_scaling(cluster, existing): scalable_processes = _get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if (ng.count > existing[ng.id] and 'HDFS_DATANODE' in ng.node_processes): dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): msg = _("CDH plugin cannot scale nodegroup with processes: " "%(processes)s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % {'processes': ' '.join(ng.node_processes)}) dn_count = _get_inst_count(cluster, 'HDFS_DATANODE') - dn_to_delete replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster) if dn_count < replicas: raise ex.ClusterCannotBeScaled( cluster, _('Number of datanodes must be not less than dfs_replication.'))
def validate_existing_ng_scaling(pctx, cluster, existing): scalable_processes = _get_scalable_processes() dn_to_delete = 0 for ng in cluster.node_groups: if ng.id in existing: if ng.count > existing[ng.id] and "datanode" in ng.node_processes: dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): msg = _("Vanilla plugin cannot scale nodegroup " "with processes: %s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % ' '.join(ng.node_processes)) dn_amount = len(vu.get_datanodes(cluster)) rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: msg = _("Vanilla plugin cannot shrink cluster because it would be " "not enough nodes for replicas (replication factor is %s)") raise ex.ClusterCannotBeScaled(cluster.name, msg % rep_factor)