def _rollback_cluster_scaling(self, cluster, instances, ex): """Attempt to rollback cluster scaling.""" for i in instances: with context.set_current_instance_id(i.instance_id): self._shutdown_instance(i) cluster = conductor.cluster_get(context.ctx(), cluster) c_u.clean_cluster_from_empty_ng(cluster)
def test_clean_cluster_from_empty_ng(self): ctx = context.ctx() cluster = self._make_sample() ng = cluster.node_groups[0] ng_len = len(cluster.node_groups) self.api.node_group_update(ctx, ng, {'count': 0}) cluster = self.api.cluster_get(ctx, cluster.id) cluster_utils.clean_cluster_from_empty_ng(cluster) cluster = self.api.cluster_get(ctx, cluster.id) self.assertEqual(ng_len - 1, len(cluster.node_groups))
def scale_cluster(id, data): context.set_current_cluster_id(id) ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) # the next map is the main object we will work with # to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} node_group_instance_map = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) if 'instances' in ng: node_group_instance_map.update({ng_id: ng['instances']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) _add_ports_for_auto_sg(ctx, cluster, plugin) try: cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_VALIDATING) quotas.check_scaling(cluster, to_be_enlarged, additional) plugin.recommend_configs(cluster, scaling=True) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception as e: with excutils.save_and_reraise_exception(): c_u.clean_cluster_from_empty_ng(cluster) c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count api.OPS.provision_scaled_cluster(id, to_be_enlarged, node_group_instance_map) return cluster
def scale_cluster(id, data): context.set_current_cluster_id(id) ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) # the next map is the main object we will work with # to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} node_group_instance_map = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) if 'instances' in ng: node_group_instance_map.update({ng_id: ng['instances']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) _add_ports_for_auto_sg(ctx, cluster, plugin) try: cluster = c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_VALIDATING) quotas.check_scaling(cluster, to_be_enlarged, additional) plugin.recommend_configs(cluster, scaling=True) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception as e: with excutils.save_and_reraise_exception(): c_u.clean_cluster_from_empty_ng(cluster) c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count api.OPS.provision_scaled_cluster(id, to_be_enlarged, node_group_instance_map) return cluster
def scale_cluster(self, cluster, target_count): ctx = context.ctx() rollback_count = self._get_ng_counts(cluster) self._update_rollback_strategy(cluster, rollback_count=rollback_count, target_count=target_count) inst_ids = self._launch_instances( cluster, target_count, SCALE_STAGES, update_stack=True, disable_rollback=False) cluster = conductor.cluster_get(ctx, cluster) c_u.clean_cluster_from_empty_ng(cluster) self._update_rollback_strategy(cluster) return inst_ids
def scale_cluster(self, cluster, node_group_id_map): _warning_logger() ctx = context.ctx() cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_SCALING_SPAWNING) instance_ids = self._scale_cluster_instances(cluster, node_group_id_map) self._update_rollback_strategy(cluster, instance_ids=instance_ids) cluster = conductor.cluster_get(ctx, cluster) c_u.clean_cluster_from_empty_ng(cluster) cluster = conductor.cluster_get(ctx, cluster) instances = c_u.get_instances(cluster, instance_ids) self._await_active(cluster, instances) self._assign_floating_ips(instances) self._await_networks(cluster, instances) cluster = conductor.cluster_get(ctx, cluster) volumes.attach_to_instances( c_u.get_instances(cluster, instance_ids)) # we should be here with valid cluster: if instances creation # was not successful all extra-instances will be removed above if instance_ids: self._configure_instances(cluster) self._update_rollback_strategy(cluster) return instance_ids