print("Dataset: " + conf['data_path'])
    print("Model: " + conf['model_path'])

    print("Evaluating with " + data_type + " a " + complexity + " " +
          net_type + " model")

    # Load data
    if data_type == "Functions_dataset":
        parameters, test_set = func_utils.read_function_data(conf['data_path'])
        gap = float(parameters[0][3])
        dim = None

        print('Puting the test data into the right shape...')
        testX, testY = func_utils.reshape_function_data(test_set)

        to_test_net = Net.Mlp(model_file=conf['model_path'], framework="keras")

    elif data_type == "Vectors_dataset":
        parameters, test_set = vect_utils.read_vector_data(conf['data_path'])
        gap = parameters.iloc[0]['gap']
        dim = None

        print('Puting the test data into the right shape...')
        testX, testY = vect_utils.reshape_vector_data(test_set)
        if net_type == "NOREC":
            to_test_net = Net.Convolution1D(model_file=conf['model_path'],
                                            framework="keras")
        else:
            to_test_net = Net.Lstm(model_file=conf['model_path'],
                                   framework="keras")
            filename = root + '/' + parameters[0][4] + '_' + parameters[0][3] + '_' + parameters[0][5] + '_Predictor'

        # Put the train data into the right shape
        trainX, trainY = func_utils.reshape_function_data(train_set)

        # Put the validation data into the right shape
        valX, valY = func_utils.reshape_function_data(val_set)

        train_data = [trainX, trainY]
        val_data = [valX, valY]

        # Model settings
        in_dim = trainX.shape[1:]
        out_dim = 1
        to_train_net = Net.Mlp(activation=activation, loss=loss, dropout=dropout,
                               drop_percentage=drop_percentage, input_shape=trainX[0].shape,
                               output_shape=out_dim, data_type="Function", framework="keras")

    elif data_type == 'Vectors_dataset':
        print('Training with vectors')
        loss = conf['vect_loss']
        # Load data
        channels = False
        batch_data = False
        _, train_set = vect_utils.read_vector_data(data_dir + 'train/samples')
        _, val_set = vect_utils.read_vector_data(data_dir + 'val/samples')
        filename = root

        # Put the train data into the right shape
        trainX, trainY = vect_utils.reshape_vector_data(train_set)