def configure_ermlp_training_pipeline(model_name: str): """Configure ERMLP from pipeline. :param model_name: name of the model :rtype: OrderedDict :return: configuration dictionary """ config = get_config_dict(model_name) # Step 1: Query embedding dimension print_training_embedding_dimension_message() print_embedding_dimension_info_message() embedding_dimension = select_integer_value( print_msg=EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=EMBEDDING_DIMENSION_ERROR_MSG) config[EMBEDDING_DIM] = embedding_dimension print_section_divider() # Step 2: Query margin loss print_training_margin_loss_message() magin_loss = select_float_value(print_msg=MARGIN_LOSS_PRINT_MSG, prompt_msg=MARGIN_LOSS_PROMPT_MSG, error_msg=MARGIN_LOSS_ERROR_MSG) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query learning rate print_learning_rate_message() learning_rate = select_float_value(print_msg=LEARNING_RATE_PRINT_MSG, prompt_msg=LEARNING_RATE_PROMPT_MSG, error_msg=LEARNING_RATE_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 4: Query batch size print_batch_size_message() batch_size = select_integer_value(print_msg=BATCH_SIZE_PRINT_MSG, prompt_msg=BATCH_SIZE_PROMPT_MSG, error_msg=BATCH_SIZE_ERROR_MSG) config[BATCH_SIZE] = batch_size print_section_divider() # Step 5: Query number of epochs print_number_epochs_message() number_epochs = select_integer_value(print_msg=EPOCH_PRINT_MSG, prompt_msg=EPOCH_PROMPT_MSG, error_msg=EPOCH_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def prompt_evaluation_parameters(config: Dict) -> None: """Prompt the user for evaluation parameters absed on the execution mode.""" if config[EXECUTION_MODE] == TRAINING_MODE: # Step 1: Ask whether to evaluate the model print_ask_for_evlauation_message() is_evaluation_mode = click.confirm( 'Do you want to evaluate your model?') print_section_divider() else: is_evaluation_mode = True # Step 2: Specify test set, if is_evaluation_mode==True if is_evaluation_mode: print_test_set_message() provide_test_set = click.confirm('Do you provide a test set yourself?') print_section_divider() if provide_test_set: test_set_path = get_input_path(TEST_FILE_PROMPT_MSG) config[TEST_SET_PATH] = test_set_path else: print_test_ratio_message() test_set_ratio = select_ratio_for_test_set() config[TEST_SET_RATIO] = test_set_ratio print_section_divider() # Ask whether to use filtered negative triples print_filter_negative_triples_message() filter_negative_triples = ask_for_filtering_of_negatives() config[FILTER_NEG_TRIPLES] = filter_negative_triples print_section_divider()
def configure_ermlp_hpo_pipeline(model_name: str): config = get_config_dict(model_name) # Step 1: Query embedding dimensions print_hpo_embedding_dimensions_message() embedding_dimensions = select_positive_integer_values( EMBEDDING_DIMENSIONS_PRINT_MSG, EMBEDDING_DIMENSIONS_PROMPT_MSG, EMBEDDING_DIMENSIONS_ERROR_MSG) config[EMBEDDING_DIM] = embedding_dimensions print_section_divider() # Step 2: Query margin loss print_hpo_margin_losses_message() magin_loss = select_float_values(print_msg=MARGIN_LOSSES_PRINT_MSG, prompt_msg=MARGIN_LOSSES_PROMPT_MSG, error_msg=MARGIN_LOSSES_ERROR_MSG) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query learning rate print_hpo_learning_rates_message() learning_rate = select_float_values(print_msg=LEARNING_RATES_PRINT_MSG, prompt_msg=LEARNING_RATES_PROMPT_MSG, error_msg=LEARNING_RATES_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 4: Query batch size print_hpo_batch_sizes_message() batch_sizes = select_positive_integer_values( print_msg=BATCH_SIZES_PRINT_MSG, prompt_msg=BATCH_SIZES_PROMPT_MSG, error_msg=BATCH_SIZES_ERROR_MSG) config[BATCH_SIZE] = batch_sizes print_section_divider() # Step 5: Query number of epochs print_hpo_epochs_message() number_epochs = select_positive_integer_values( print_msg=EPOCHS_PRINT_MSG, prompt_msg=EPOCHS_PROMPT_MSG, error_msg=EPOCHS_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def prompt_execution_parameters(config: Dict, model_name: str) -> None: """Prompt the user for execution mode parameters.""" pykeen_exec_mode = config[EXECUTION_MODE] if pykeen_exec_mode == TRAINING_MODE: config.update(_configure_training_pipeline(model_name)) elif pykeen_exec_mode == HPO_MODE: config.update(_configure_hpo_pipeline(model_name)) # Query number of HPO iterations hpo_iter = select_integer_value( print_msg=HPO_ITERS_PRINT_MSG, prompt_msg=HPO_ITERS_PROMPT_MSG, error_msg=HPO_ITERS_ERROR_MSG, ) config[NUM_OF_HPO_ITERS] = hpo_iter print_section_divider()
def prompt_biokeen_config(*, connection: str, rebuild: bool, do_prompt_bio2bel: Optional[bool] = None) -> Dict: """Configure experiments.""" config = OrderedDict() # Step 1: Welcome + Intro print_welcome_message() print_section_divider() print_intro() print_section_divider() # Step 2: Ask for data source if do_prompt_bio2bel is None: do_prompt_bio2bel = click.confirm( 'Do you want to use one of the databases provided by BioKEEN?', default=True) print_section_divider() do_prompt_training = True if do_prompt_bio2bel: do_prompt_training = False config[TRAINING_SET_PATH] = [] for name in select_bio2bel_repository(): try: path = install_bio2bel_module(name=name, connection=connection, rebuild=rebuild) except Exception: click.secho(f'failed: {name}', fg='red') else: if os.path.exists(path): config[TRAINING_SET_PATH].append(f'bio2bel:{name}') else: click.secho(f'failed: {name}: {path}', fg='red') # TODO replace this with less safe code that assumes everything installs no problemo """ config[TRAINING_SET_PATH] = [ f'bio2bel:{name}' for name in select_bio2bel_repository() ] """ print_section_divider() return prompt_config( config=config, show_welcome=False, do_prompt_training=do_prompt_training, )
def configure_um_training_pipeline(model_name: str): config = OrderedDict() config[KG_EMBEDDING_MODEL_NAME] = model_name # Step 1: Query embedding dimension print_training_embedding_dimension_message() print_embedding_dimension_info_message() embedding_dimension = select_integer_value(print_msg=EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=EMBEDDING_DIMENSION_ERROR_MSG) config[EMBEDDING_DIM] = embedding_dimension print_section_divider() # Step 2: Query margin loss print_training_margin_loss_message() magin_loss = select_float_value(print_msg=MARGIN_LOSS_PRINT_MSG, prompt_msg=MARGIN_LOSS_PROMPT_MSG, error_msg=MARGIN_LOSS_ERROR_MSG) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query L_p norm as scoring function print_scoring_fct_message() scoring_fct_norm = select_integer_value(print_msg=NORM_SCORING_FUNCTION_PRINT_MSG, prompt_msg=NORM_SCORING_FUNCTION_PROMPT_MSG, error_msg=NORM_SCORING_FUNCTION_ERROR_MSG) config[SCORING_FUNCTION_NORM] = scoring_fct_norm print_section_divider() # Step 4: Query L_p norm for normalizing the entities print_entity_normalization_message() entity_normalization_norm = select_integer_value(print_msg=ENTITIES_NORMALIZATION_PRINT_MSG, prompt_msg=ENTITIES_NORMALIZATION_PROMPT_MSG, error_msg=ENTITIES_NORMALIZATION_ERROR_MSG) config[NORM_FOR_NORMALIZATION_OF_ENTITIES] = entity_normalization_norm print_section_divider() # Step 5: Query learning rate print_learning_rate_message() learning_rate = select_float_value(print_msg=LEARNING_RATE_PRINT_MSG, prompt_msg=LEARNING_RATE_PROMPT_MSG, error_msg=LEARNING_RATE_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_batch_size_message() batch_size = select_integer_value(print_msg=BATCH_SIZE_PRINT_MSG, prompt_msg=BATCH_SIZE_PROMPT_MSG, error_msg=BATCH_SIZE_ERROR_MSG) config[BATCH_SIZE] = batch_size print_section_divider() # Step 7: Query number of epochs print_number_epochs_message() number_epochs = select_integer_value(print_msg=EPOCH_PRINT_MSG, prompt_msg=EPOCH_PROMPT_MSG, error_msg=EPOCH_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def configure_um_hpo_pipeline(model_name): config = get_config_dict(model_name) # Step 1: Query embedding dimensions print_hpo_embedding_dimensions_message() embedding_dimensions = select_positive_integer_values(EMBEDDING_DIMENSIONS_PRINT_MSG, EMBEDDING_DIMENSIONS_PROMPT_MSG, EMBEDDING_DIMENSIONS_ERROR_MSG) config[EMBEDDING_DIM] = embedding_dimensions print_section_divider() # Step 2: Query margin loss print_hpo_margin_losses_message() magin_loss = select_float_values( print_msg=MARGIN_LOSSES_PRINT_MSG, prompt_msg=MARGIN_LOSSES_PROMPT_MSG, error_msg=MARGIN_LOSSES_ERROR_MSG) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query L_p norms to use as scoring function print_hpo_scoring_fcts_message() scoring_fct_norm = select_positive_integer_values( print_msg=NORMS_SCORING_FUNCTION_PRINT_MSG, prompt_msg=NORMS_SCORING_FUNCTION_PROMPT_MSG, error_msg=NORMS_SCORING_FUNCTION_ERROR_MSG ) config[SCORING_FUNCTION_NORM] = scoring_fct_norm print_section_divider() # Step 4: Query L_p norms for normalizing the entities print_hpo_entity_normalization_norms_message() entity_normalization_norm = select_positive_integer_values( print_msg=NORMS_FOR_NORMALIZATION_OF_ENTITIES_PRINT_MSG, prompt_msg=NORMS_FOR_NORMALIZATION_OF_ENTITIES_PROMPT_MSG, error_msg=NORMS_FOR_NORMALIZATION_OF_ENTITIES_ERROR_MSG ) config[NORM_FOR_NORMALIZATION_OF_ENTITIES] = entity_normalization_norm print_section_divider() # Step 5: Query learning rate print_hpo_learning_rates_message() learning_rate = select_float_values( print_msg=LEARNING_RATES_PRINT_MSG, prompt_msg=LEARNING_RATES_PROMPT_MSG, error_msg=LEARNING_RATES_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_hpo_batch_sizes_message() batch_size = select_positive_integer_values( print_msg=BATCH_SIZES_PRINT_MSG, prompt_msg=BATCH_SIZES_PROMPT_MSG, error_msg=BATCH_SIZES_ERROR_MSG) config[BATCH_SIZE] = batch_size print_section_divider() # Step 7: Query number of epochs print_hpo_epochs_message() number_epochs = select_positive_integer_values( print_msg=EPOCHS_PRINT_MSG, prompt_msg=EPOCHS_PROMPT_MSG, error_msg=EPOCHS_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def configure_trans_d_training_pipeline(model_name: str): """Configure Trans D from pipeline. :param model_name: name of the model :return: configuration dictionary """ config = get_config_dict(model_name) # Step 1: Query embedding dimension for entities print_entities_embedding_dimension_message() embedding_dimension = select_integer_value( print_msg=ENTITIES_EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=ENTITIES_EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=ENTITIES_EMBEDDING_DIMENSION_ERROR_MSG, ) config[EMBEDDING_DIM] = embedding_dimension print_section_divider() # Step 2: Query embedding dimension for relations print_relations_embedding_dimension_message() relation_embedding_dimension = select_integer_value( print_msg=RELATION_EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=RELATION_EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=RELATION_EMBEDDING_DIMENSION_ERROR_MSG, ) config[RELATION_EMBEDDING_DIM] = relation_embedding_dimension print_section_divider() # Step 2: Query margin loss print_training_margin_loss_message() magin_loss = select_float_value(print_msg=MARGIN_LOSS_PRINT_MSG, prompt_msg=MARGIN_LOSS_PROMPT_MSG, error_msg=MARGIN_LOSS_ERROR_MSG) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query L_p norm as scoring function print_scoring_fct_message() scoring_fct_norm = select_integer_value( print_msg=NORM_SCORING_FUNCTION_PRINT_MSG, prompt_msg=NORM_SCORING_FUNCTION_PROMPT_MSG, error_msg=NORM_SCORING_FUNCTION_ERROR_MSG) config[SCORING_FUNCTION_NORM] = scoring_fct_norm print_section_divider() # Step 5: Query learning rate print_learning_rate_message() learning_rate = select_float_value(print_msg=LEARNING_RATE_PRINT_MSG, prompt_msg=LEARNING_RATE_PROMPT_MSG, error_msg=LEARNING_RATE_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_batch_size_message() batch_size = select_integer_value(print_msg=BATCH_SIZE_PRINT_MSG, prompt_msg=BATCH_SIZE_PROMPT_MSG, error_msg=BATCH_SIZE_ERROR_MSG) config[BATCH_SIZE] = batch_size print_section_divider() # Step 7: Query number of epochs print_number_epochs_message() number_epochs = select_integer_value(print_msg=EPOCH_PRINT_MSG, prompt_msg=EPOCH_PROMPT_MSG, error_msg=EPOCH_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def prompt_config(*, config: Optional[Dict] = None, show_welcome: bool = True, do_prompt_training: bool = True) -> Dict: """Prompt the user for the run configuration.""" if config is None: config = OrderedDict() # Step 1: Welcome + Intro if show_welcome: print_welcome_message() print_section_divider() print_intro() print_section_divider() # Step 2: Ask for training file if do_prompt_training: prompt_training_file(config) print_section_divider() # Step 3: Ask for execution mode prompt_execution_mode(config) print_section_divider() # Step 4: Ask for model model_name = prompt_embedding_model() print_section_divider() # Step 5: Query parameters depending on the selected execution mode prompt_execution_parameters(config, model_name=model_name) print_section_divider() # Step 5.5: Prompt for evaluation parameters depending on the selected execution mode prompt_evaluation_parameters(config) # Step 6: Please select a random seed prompt_random_seed(config) print_section_divider() # Step 7: Query device to train on prompt_device(config) print_section_divider() # Step 8: Define output directory config[OUTPUT_DIREC] = query_output_directory() print_section_divider() config_path = os.path.join(config[OUTPUT_DIREC], 'configuration.json') with open(config_path, 'w') as file: json.dump(config, file, indent=2) return config
def prompt_embedding_model() -> str: """Prompt the user to select an embedding model.""" print_model_selection_message() model_name = select_embedding_model() print_section_divider() return model_name
def configure_trans_h_training_pipeline(model_name: str) -> Dict: """Prompt the user to configure Trans H from pipeline.""" config = get_config_dict(model_name) # Step 1: Query embedding dimension print_training_embedding_dimension_message() print_embedding_dimension_info_message() embedding_dimension = select_integer_value( print_msg=EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=EMBEDDING_DIMENSION_ERROR_MSG, ) config[EMBEDDING_DIM] = embedding_dimension print_section_divider() # Step 2: Query margin loss print_training_margin_loss_message() magin_loss = select_float_value( print_msg=MARGIN_LOSS_PRINT_MSG, prompt_msg=MARGIN_LOSS_PROMPT_MSG, error_msg=MARGIN_LOSS_ERROR_MSG, ) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query L_p norm as scoring function print_scoring_fct_message() scoring_fct_norm = select_integer_value( print_msg=NORM_SCORING_FUNCTION_PRINT_MSG, prompt_msg=NORM_SCORING_FUNCTION_PROMPT_MSG, error_msg=NORM_SCORING_FUNCTION_ERROR_MSG ) config[SCORING_FUNCTION_NORM] = scoring_fct_norm print_section_divider() # Step 4: Query weight for the soft constraints print_trans_h_soft_constraints_weight_message() soft_constraints_weight = select_float_value( print_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_PRINT_MSG, prompt_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_PROMPT_MSG, error_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_ERROR_MSG, ) config[WEIGHT_SOFT_CONSTRAINT_TRANS_H] = soft_constraints_weight print_section_divider() # Step 5: Query learning rate print_learning_rate_message() learning_rate = select_float_value( print_msg=LEARNING_RATE_PRINT_MSG, prompt_msg=LEARNING_RATE_PROMPT_MSG, error_msg=LEARNING_RATE_ERROR_MSG, ) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_batch_size_message() batch_size = select_integer_value( print_msg=BATCH_SIZE_PRINT_MSG, prompt_msg=BATCH_SIZE_PROMPT_MSG, error_msg=BATCH_SIZE_ERROR_MSG, ) config[BATCH_SIZE] = batch_size print_section_divider() # Step 7: Query number of epochs print_number_epochs_message() number_epochs = select_integer_value( print_msg=EPOCH_PRINT_MSG, prompt_msg=EPOCH_PROMPT_MSG, error_msg=EPOCH_ERROR_MSG, ) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def configure_trans_h_hpo_pipeline(model_name): config = get_config_dict(model_name) # Step 1: Query embedding dimensions print_hpo_embedding_dimensions_message() embedding_dimensions = select_positive_integer_values( EMBEDDING_DIMENSIONS_PRINT_MSG, EMBEDDING_DIMENSIONS_PROMPT_MSG, EMBEDDING_DIMENSIONS_ERROR_MSG, ) config[EMBEDDING_DIM] = embedding_dimensions print_section_divider() # Step 2: Query margin loss print_hpo_margin_losses_message() magin_loss = select_float_values( print_msg=MARGIN_LOSSES_PRINT_MSG, prompt_msg=MARGIN_LOSSES_PROMPT_MSG, error_msg=MARGIN_LOSSES_ERROR_MSG, ) config[MARGIN_LOSS] = magin_loss print_section_divider() # Step 3: Query L_p norms to use as scoring function print_hpo_scoring_fcts_message() scoring_fct_norm = select_positive_integer_values( print_msg=NORMS_SCORING_FUNCTION_PRINT_MSG, prompt_msg=NORMS_SCORING_FUNCTION_PROMPT_MSG, error_msg=NORMS_SCORING_FUNCTION_ERROR_MSG, ) config[SCORING_FUNCTION_NORM] = scoring_fct_norm print_section_divider() # Step 4: Query weight for the soft constraints print_hpo_trans_h_soft_constraints_weights_message() soft_constraints_weight = select_float_values( print_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_PRINT_MSG, prompt_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_PROMPT_MSG, error_msg=WEIGHTS_SOFT_CONSTRAINT_TRANS_H_ERROR_MSG, ) config[WEIGHT_SOFT_CONSTRAINT_TRANS_H] = soft_constraints_weight print_section_divider() # Step 5: Query learning rate print_hpo_learning_rates_message() learning_rate = select_float_values( print_msg=LEARNING_RATES_PRINT_MSG, prompt_msg=LEARNING_RATES_PROMPT_MSG, error_msg=LEARNING_RATES_ERROR_MSG, ) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_hpo_batch_sizes_message() batch_sizes = select_positive_integer_values( print_msg=BATCH_SIZES_PRINT_MSG, prompt_msg=BATCH_SIZES_PROMPT_MSG, error_msg=BATCH_SIZES_ERROR_MSG, ) config[BATCH_SIZE] = batch_sizes print_section_divider() # Step 7: Query number of epochs print_hpo_epochs_message() number_epochs = select_positive_integer_values( print_msg=EPOCHS_PRINT_MSG, prompt_msg=EPOCHS_PROMPT_MSG, error_msg=EPOCHS_ERROR_MSG, ) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def configure_conv_e_hpo_pipeline(model_name): """Configure ConvE. :param str model_name: name of the model :rtype: OrderedDict :return: configuration dictionary """ config = get_config_dict(model_name) # Step 1: Query embedding dimension print_hpo_embedding_dimensions_message() embedding_dimensions = select_positive_integer_values( EMBEDDING_DIMENSIONS_PRINT_MSG, EMBEDDING_DIMENSIONS_PROMPT_MSG, EMBEDDING_DIMENSIONS_ERROR_MSG, ) config[EMBEDDING_DIM] = embedding_dimensions print_section_divider() # Step 2: Query height and width print_hpo_conv_e_width_height_message() heights, widths = select_heights_and_widths(embedding_dimensions) config[CONV_E_HEIGHT] = heights config[CONV_E_WIDTH] = widths print_section_divider() # Step 3: Query number of input channels print_conv_input_channels_message() num_input_channels = select_positive_integer_values( CONV_E_HPO_INPUT_CHANNELS_PRINT_MSG, CONV_E_HPO_INPUT_CHANNELS_PROMPT_MSG, CONV_E_HPO_INPUT_CHANNELS_ERROR_MSG, ) config[CONV_E_INPUT_CHANNELS] = num_input_channels print_section_divider() # Step 4: Query number of output channels print_conv_e_output_channels_message() num_output_channels = select_positive_integer_values( CONV_E_HPO_OUT_CHANNELS_PRINT_MSG, CONV_E_HPO_OUT_CHANNELS_PROMPT_MSG, CONV_E_HPO_OUT_CHANNELS_ERROR_MSG, ) config[CONV_E_OUTPUT_CHANNELS] = num_output_channels print_section_divider() # Step 4: Query kernel height print_conv_e_hpo_kernel_height_message() kernel_heights = select_kernel_sizes( depending_params=heights, print_msg=CONV_E_HPO_KERNEL_HEIGHTS_PRINT_MSG, prompt_msg=CONV_E_HPO_KERNEL_HEIGHTS_PROMPT_MSG, error_msg=CONV_E_HPO_KERNEL_HEIGHTS_ERROR_MSG, ) config[CONV_E_KERNEL_HEIGHT] = kernel_heights print_section_divider() # Step 5: Query kernel width print_conv_e_hpo_kernel_width_message() kernel_widths = select_kernel_sizes( depending_params=widths, print_msg=CONV_E_KERNEL_WIDTH_PRINT_MSG, prompt_msg=CONV_E_KERNEL_WIDTH_PROMPT_MSG, error_msg=CONV_E_KERNEL_WIDTH_ERROR_MSG, ) config[CONV_E_KERNEL_WIDTH] = kernel_widths print_section_divider() # Step 6: Query dropout for input layer print_hpo_input_dropout_message() input_dropout = select_float_values( CONV_E_HPO_INPUT_DROPOUTS_PRINT_MSG, CONV_E_HPO_INPUT_DROPOUTS_PROMPT_MSG, CONV_E_HPO_INPUT_DROPOUTS_ERROR_MSG, ) config[CONV_E_INPUT_DROPOUT] = input_dropout print_section_divider() # Step 7: Query dropout for output layer print_hpo_output_dropout_message() output_dropouts = select_zero_one_range_float_values( print_msg=CONV_E_HPO_OUTPUT_DROPOUT_PRINT_MSG, prompt_msg=CONV_E_HPO_OUTPUT_DROPOUT_PROMPT_MSG, error_msg=CONV_E_HPO_OUTPUT_DROPOUT_ERROR_MSG, ) config[CONV_E_OUTPUT_DROPOUT] = output_dropouts print_section_divider() # Step 8: Query feature map dropout for output layer print_hpo_feature_maps_dropouts_message() feature_map_dropouts = select_zero_one_range_float_values( print_msg=CONV_E_HPO_FEATURE_MAP_DROPOUT_PRINT_MSG, prompt_msg=CONV_E_HPO_FEATURE_MAP_DROPOUT_PROMPT_MSG, error_msg=CONV_E_HPO_FEATURE_MAP_DROPOUT_ERROR_MSG, ) config[CONV_E_FEATURE_MAP_DROPOUT] = feature_map_dropouts print_section_divider() # Step 9: Query learning rate print_hpo_learning_rates_message() learning_rates = select_float_values( print_msg=LEARNING_RATES_PRINT_MSG, prompt_msg=LEARNING_RATES_PROMPT_MSG, error_msg=LEARNING_RATES_ERROR_MSG, ) config[LEARNING_RATE] = learning_rates print_section_divider() # Step 10: Query batch size print_hpo_batch_sizes_message() batch_size = select_positive_integer_values( print_msg=BATCH_SIZES_PRINT_MSG, prompt_msg=BATCH_SIZES_PROMPT_MSG, error_msg=BATCH_SIZES_ERROR_MSG, ) config[BATCH_SIZE] = batch_size print_section_divider() # Step 11: Query number of epochs print_hpo_epochs_message() number_epochs = select_positive_integer_values( print_msg=EPOCHS_PRINT_MSG, prompt_msg=EPOCHS_PROMPT_MSG, error_msg=EPOCHS_ERROR_MSG, ) config[NUM_EPOCHS] = number_epochs print_section_divider() return config
def configure_conv_e_training_pipeline(model_name: str): """Configure ConvE. :param str model_name: name of the model :return: configuration dictionary """ config = get_config_dict(model_name) # Step 1: Query embedding dimension print_training_embedding_dimension_message() print_embedding_dimension_info_message() embedding_dimension = select_integer_value( print_msg=EMBEDDING_DIMENSION_PRINT_MSG, prompt_msg=EMBEDDING_DIMENSION_PROMPT_MSG, error_msg=EMBEDDING_DIMENSION_ERROR_MSG, ) config[EMBEDDING_DIM] = embedding_dimension print_section_divider() # Step 2: Query height and width print_conv_e_width_height_message() height, width = query_height_and_width_for_conv_e(embedding_dimension) config[CONV_E_HEIGHT] = height config[CONV_E_WIDTH] = width print_section_divider() # Step 3: Query number of input channels print_conv_input_channels_message() num_input_channels = select_integer_value( CONV_E_INPUT_CHANNEL_PRINT_MSG, CONV_E_INPUT_CHANNEL_PROMPT_MSG, CONV_E_INPUT_CHANNEL_ERROR_MSG, ) config[CONV_E_INPUT_CHANNELS] = num_input_channels print_section_divider() # Step 4: Query number of output channels print_conv_e_output_channels_message() num_output_channels = select_integer_value( CONV_E_OUT_CHANNEL_PRINT_MSG, CONV_E_OUT_CHANNEL_PROMPT_MSG, CONV_E_OUT_CHANNEL_ERROR_MSG, ) config[CONV_E_OUTPUT_CHANNELS] = num_output_channels print_section_divider() # Step 4: Query kernel height print_conv_kernel_height_message() kernel_height = query_kernel_param( depending_param=height, print_msg=CONV_E_KERNEL_HEIGHT_PRINT_MSG, prompt_msg=CONV_E_KERNEL_HEIGHT_PROMPT_MSG, error_msg=CONV_E_KERNEL_HEIGHT_ERROR_MSG, ) config[CONV_E_KERNEL_HEIGHT] = kernel_height print_section_divider() # Step 5: Query kernel width print_conv_kernel_width_message() kernel_width = query_kernel_param( depending_param=width, print_msg=CONV_E_KERNEL_WIDTH_PRINT_MSG, prompt_msg=CONV_E_KERNEL_WIDTH_PROMPT_MSG, error_msg=CONV_E_KERNEL_WIDTH_ERROR_MSG, ) config[CONV_E_KERNEL_WIDTH] = kernel_width print_section_divider() # Step 6: Query dropout for input layer print_hpo_input_dropout_message() input_dropout = select_zero_one_float_value( print_msg=CONV_E_INPUT_DROPOUT_PRINT_MSG, prompt_msg=CONV_E_INPUT_DROPOUT_PROMPT_MSG, error_msg=CONV_E_INPUT_DROPOUT_ERROR_MSG, ) config[CONV_E_INPUT_DROPOUT] = input_dropout print_section_divider() # Step 7: Query dropout for output layer print_output_dropout_message() output_dropout = select_zero_one_float_value( print_msg=CONV_E_OUTPUT_DROPOUT_PRINT_MSG, prompt_msg=CONV_E_OUTPUT_DROPOUT_PROMPT_MSG, error_msg=CONV_E_OUTPUT_DROPOUT_ERROR_MSG, ) config[CONV_E_OUTPUT_DROPOUT] = output_dropout print_section_divider() # Step 8: Query feature map dropout for output layer print_feature_map_dropout_message() feature_map_dropout = select_zero_one_float_value( print_msg=CONV_E_FEATURE_MAP_DROPOUT_PRINT_MSG, prompt_msg=CONV_E__FEATURE_MAP_DROPOUT_PROMPT_MSG, error_msg=CONV_E_FEATURE_MAP_DROPOUT_ERROR_MSG, ) config[CONV_E_FEATURE_MAP_DROPOUT] = feature_map_dropout print_section_divider() # Step 5: Query learning rate print_learning_rate_message() learning_rate = select_float_value(print_msg=LEARNING_RATE_PRINT_MSG, prompt_msg=LEARNING_RATE_PROMPT_MSG, error_msg=LEARNING_RATE_ERROR_MSG) config[LEARNING_RATE] = learning_rate print_section_divider() # Step 6: Query batch size print_batch_size_message() batch_size = select_integer_value(print_msg=BATCH_SIZE_PRINT_MSG, prompt_msg=BATCH_SIZE_PROMPT_MSG, error_msg=BATCH_SIZE_ERROR_MSG) config[BATCH_SIZE] = batch_size print_section_divider() # Step 7: Query number of epochs print_number_epochs_message() number_epochs = select_integer_value(print_msg=EPOCH_PRINT_MSG, prompt_msg=EPOCH_PROMPT_MSG, error_msg=EPOCH_ERROR_MSG) config[NUM_EPOCHS] = number_epochs print_section_divider() return config