def train_linear_elastic_net(training_predictor_table, training_target_table,
                             validation_predictor_table,
                             validation_target_table):
    """Trains linear regression with elastic-net penalty.

    :param training_predictor_table: See doc for `utils.read_feature_file`.
    :param training_target_table: Same.
    :param validation_predictor_table: Same.
    :param validation_target_table: Same.
    """

    linear_en_model_object = utils.setup_linear_regression(lambda1=1e-5,
                                                           lambda2=5.)

    _ = utils.train_linear_regression(
        model_object=linear_en_model_object,
        training_predictor_table=training_predictor_table,
        training_target_table=training_target_table)

    training_predictions = linear_en_model_object.predict(
        training_predictor_table.as_matrix())
    mean_training_target_value = numpy.mean(
        training_target_table[utils.TARGET_NAME].values)

    _ = utils.evaluate_regression(
        target_values=training_target_table[utils.TARGET_NAME].values,
        predicted_target_values=training_predictions,
        mean_training_target_value=mean_training_target_value,
        dataset_name='training')
    print(MINOR_SEPARATOR_STRING)

    validation_predictions = linear_en_model_object.predict(
        validation_predictor_table.as_matrix())

    _ = utils.evaluate_regression(
        target_values=validation_target_table[utils.TARGET_NAME].values,
        predicted_target_values=validation_predictions,
        mean_training_target_value=mean_training_target_value,
        dataset_name='validation')
def l1l2_experiment_testing(lambda1_values, lambda2_values,
                            validation_mae_skill_matrix,
                            training_predictor_table, training_target_table,
                            testing_predictor_table, testing_target_table):
    """Selects and tests model for experiment with L1/L2 regularization.

    :param lambda1_values: See doc for `l1l2_experiment_validation`.
    :param lambda2_values: Same.
    :param validation_mae_skill_matrix: Same.
    :param training_predictor_table: See doc for `utils.read_feature_file`.
    :param training_target_table: Same.
    :param testing_predictor_table: Same.
    :param testing_target_table: Same.
    """

    best_linear_index = numpy.argmax(numpy.ravel(validation_mae_skill_matrix))

    best_lambda1_index, best_lambda2_index = numpy.unravel_index(
        best_linear_index, (len(lambda1_values), len(lambda2_values)))

    best_lambda1 = lambda1_values[best_lambda1_index]
    best_lambda2 = lambda2_values[best_lambda2_index]
    best_validation_maess = numpy.max(validation_mae_skill_matrix)

    message_string = (
        'Best MAE skill score on validation data = {0:.3f} ... corresponding '
        'lasso coeff = 10^{1:.1f}, ridge coeff = 10^{2:.1f}').format(
            best_validation_maess, numpy.log10(best_lambda1),
            numpy.log10(best_lambda2))

    print(message_string)

    final_model_object = utils.setup_linear_regression(lambda1=best_lambda1,
                                                       lambda2=best_lambda2)

    _ = utils.train_linear_regression(
        model_object=final_model_object,
        training_predictor_table=training_predictor_table,
        training_target_table=training_target_table)

    testing_predictions = final_model_object.predict(
        testing_predictor_table.as_matrix())
    mean_training_target_value = numpy.mean(
        training_target_table[utils.TARGET_NAME].values)

    this_evaluation_dict = utils.evaluate_regression(
        target_values=testing_target_table[utils.TARGET_NAME].values,
        predicted_target_values=testing_predictions,
        mean_training_target_value=mean_training_target_value,
        dataset_name='testing')
def l1l2_experiment_training(training_predictor_table, training_target_table,
                             validation_predictor_table,
                             validation_target_table):
    """Trains models for hyperparameter experiment with L1/L2 regularization.

    :param training_predictor_table: See doc for `utils.read_feature_file`.
    :param training_target_table: Same.
    :param validation_predictor_table: Same.
    :param validation_target_table: Same.
    """

    lambda1_values = numpy.logspace(-8, -4, num=9)
    lambda2_values = numpy.logspace(-4, 1, num=11)

    num_lambda1 = len(lambda1_values)
    num_lambda2 = len(lambda2_values)

    validation_mae_matrix_s01 = numpy.full((num_lambda1, num_lambda2),
                                           numpy.nan)
    validation_mse_matrix_s02 = numpy.full((num_lambda1, num_lambda2),
                                           numpy.nan)
    validation_mae_skill_matrix = numpy.full((num_lambda1, num_lambda2),
                                             numpy.nan)
    validation_mse_skill_matrix = numpy.full((num_lambda1, num_lambda2),
                                             numpy.nan)

    mean_training_target_value = numpy.mean(
        training_target_table[utils.TARGET_NAME].values)

    for i in range(num_lambda1):
        for j in range(num_lambda2):
            this_message_string = (
                'Training model with lasso coeff = 10^{0:.1f}, ridge coeff = '
                '10^{1:.1f}...').format(numpy.log10(lambda1_values[i]),
                                        numpy.log10(lambda2_values[j]))

            print(this_message_string)

            this_model_object = utils.setup_linear_regression(
                lambda1=lambda1_values[i], lambda2=lambda2_values[j])

            _ = utils.train_linear_regression(
                model_object=this_model_object,
                training_predictor_table=training_predictor_table,
                training_target_table=training_target_table)

            these_validation_predictions = this_model_object.predict(
                validation_predictor_table.as_matrix())

            this_evaluation_dict = utils.evaluate_regression(
                target_values=validation_target_table[
                    utils.TARGET_NAME].values,
                predicted_target_values=these_validation_predictions,
                mean_training_target_value=mean_training_target_value,
                verbose=False,
                create_plots=False)

            validation_mae_matrix_s01[i,
                                      j] = this_evaluation_dict[utils.MAE_KEY]
            validation_mse_matrix_s02[i,
                                      j] = this_evaluation_dict[utils.MSE_KEY]
            validation_mae_skill_matrix[i, j] = this_evaluation_dict[
                utils.MAE_SKILL_SCORE_KEY]
            validation_mse_skill_matrix[i, j] = this_evaluation_dict[
                utils.MSE_SKILL_SCORE_KEY]