def k2hb_stop(context, timeout=30, **kwargs): console_printer.print_info("Executing 'k2hb_stop' fixture") context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_k2hb_main_london, 0 ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_k2hb_main_dedicated_london, 0 ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_k2hb_equality_london, 0 ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_k2hb_audit_london, 0 )
def ingest_ecs_cluster_start(context, timeout=30, **kwargs): console_printer.print_info("Executing 'ingestion_ecs_cluster_start' fixture") context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_ingestion_ecs_cluster, int(context.asg_max_count_ingestion_ecs_cluster), )
def snapshot_sender_start_max(context, timeout=30, **kwargs): console_printer.print_info("Executing 'snapshot_sender_start_max' fixture") desired_count = ( context.asg_max_count_snapshot_sender if not context.snapshot_sender_scale_up_override else context.snapshot_sender_scale_up_override ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_snapshot_sender, int(desired_count) )
def step_impl(context): asg_prefix = context.last_scaled_asg[0] desired_count = context.last_scaled_asg[1] console_printer.print_info( f"Waiting for autoscaling group with prefix of '{asg_prefix}' to set desired count to '{str(desired_count)}'" ) time_taken = 1 timeout_time = time.time() + context.timeout asg_finished_scaling = False while not asg_finished_scaling and time.time() < timeout_time: count = 0 while not asg_finished_scaling and count < 60: if aws_helper.get_asg_desired_count(None, asg_prefix) == desired_count: asg_finished_scaling = True break time.sleep(5) count += 5 if asg_finished_scaling: console_printer.print_info( f"Autoscaling group with prefix of '{asg_prefix}' correctly set desired count of '{str(desired_count)}'" ) break else: console_printer.print_info( f"Autoscaling group with prefix of '{asg_prefix}' not set desired count of '{str(desired_count)}' after one minute, so trying to scale again" ) aws_helper.scale_asg_if_desired_count_is_not_already_set( asg_prefix, desired_count) assert ( asg_finished_scaling ), f"Autoscaling group with prefix of '{asg_prefix}' not set desired count to '{str(desired_count)}' after '{str(context.timeout)}' seconds"
def historic_data_importer_start_base(context, prefix_list): all_prefixes = template_helper.get_historic_data_importer_prefixes( prefix_list, context.historic_importer_use_one_message_per_path ) if len(all_prefixes) == 1: all_prefixes.append( "This is added because we never want one HDI if max is above 1" ) desired_count = manifest_comparison_helper.get_desired_asg_count( all_prefixes, context.asg_max_count_hdi ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_hdi, int(desired_count) )
def htme_start_incremental(context, timeout=30, **kwargs): console_printer.print_info("Executing 'htme_start_incremental' fixture") updated_topics = message_helper.get_consolidated_topics_list( context.topics, "incremental", context.default_topic_list_full_delimited, context.default_topic_list_incremental_delimited, [ context.generate_snapshots_topics_override, context.send_snapshots_topics_override, ], ) desired_count = manifest_comparison_helper.get_desired_asg_count( updated_topics, context.asg_max_count_htme ) context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_htme, int(desired_count) )
def step_impl(context, snapshot_type): run_ss_after_export = ( context.generate_snapshots_trigger_snapshot_sender_override == "true") if run_ss_after_export: updated_topics = message_helper.get_consolidated_topics_list( context.topics, snapshot_type, context.default_topic_list_full_delimited, context.default_topic_list_incremental_delimited, [context.generate_snapshots_topics_override], ) desired_count = (context.asg_max_count_snapshot_sender if not context.snapshot_sender_scale_up_override else context.snapshot_sender_scale_up_override) context.last_scaled_asg = ( aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_snapshot_sender, int(desired_count)))
def kafka_stub_stop(context, timeout=30, **kwargs): console_printer.print_info("Executing 'kafka_stub_stop' fixture") context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_kafka_stub, 0 )
def historic_data_importer_stop(context, timeout=30, **kwargs): console_printer.print_info("Executing 'historic_data_importer_stop' fixture") context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_hdi, 0 )
def htme_start_max(context, timeout=30, **kwargs): console_printer.print_info("Executing 'htme_start_max' fixture") context.last_scaled_asg = aws_helper.scale_asg_if_desired_count_is_not_already_set( context.asg_prefix_htme, int(context.asg_max_count_htme) )