Ejemplo n.º 1
0
def evaluator(x_train, y_train, x_val, y_val, x_test, y_test, experiment_name="", init="fixed", **kwargs):
    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Softmax())

    # Fit model
    model_save_path = "models/" + experiment_name + "/" + dict_to_string(kwargs) + "_" + init
    best_model = model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val,
                           save_path=model_save_path, **kwargs)
    
    # Plot results
    test_acc = best_model.get_classification_metrics(x_test, y_test)[0]
    subtitle = "l2_reg: " + str(kwargs["l2_reg"]) + ", lr: " + str(kwargs["lr"]) +\
                ", weight_init:" + init + ", Test Acc: " + str(test_acc)
    best_model.plot_training_progress(show=False,
                                    save=True,
                                    name="figures/" + experiment_name + "/" + dict_to_string(kwargs) + "_" + init,
                                    subtitle=subtitle)
    montage(W=np.array(best_model.layers[0].weights[:, :-1]),
            title=subtitle,
            path="figures/" + experiment_name + "/weights/" + dict_to_string(kwargs) + "_" + init)

    # Minimizing value: validation accuracy
    val_acc = best_model.get_classification_metrics(x_val, y_val)[0] # Get accuracy
    result = {"value": val_acc, "model": best_model}  # Save score and model
    return result
from mlp.losses import CrossEntropy
from mlp.layers import Conv2D, Dense, Softmax, Relu, Flatten, Dropout, MaxPool2D
from mlp.callbacks import MetricTracker, BestModelSaver, LearningRateScheduler
from mlp.utils import plot_confusion_matrix

np.random.seed(1)

if __name__ == "__main__":
    # Load data
    x_test, y_test, names = read_names_test()
    classes = read_names_countries()

    print(x_test.shape)

    # Load model model
    model = Sequential(loss=CrossEntropy())
    model.load("models/names_test")
    # model.load("models/names_no_compensation")

    y_pred_prob_test = model.predict(x_test)
    y_pred_test = model.predict_classes(x_test)
    print(y_pred_prob_test)
    print(y_test)

    plot_confusion_matrix(y_pred_test, y_test, classes, "figures/conf_test")

    import matplotlib.pyplot as plt
    plt.title("Prediction Vectors")
    pos = plt.imshow(y_pred_prob_test.T)
    plt.xticks(range(len(classes)), classes, rotation=45, ha='right')
    plt.yticks(range(len(names)), names)
Ejemplo n.º 3
0
    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info (losses and metrics)
    # lrs = LearningRateScheduler(evolution="linear", lr_min=1e-3, lr_max=9e-1)
    # lrs = LearningRateScheduler(evolution="constant", lr_min=1e-3, lr_max=9e-1)
    # callbacks = [mt, lrs]
    callbacks = [mt]

    # Define hyperparams
    d = x_train.shape[0]
    n1 = 40  # Filters of first Conv2D
    k1 = 6  # First kernel y size
    n2 = 20  # Filters of second Conv2D
    k2 = 4  # Second kernel y size
    # Define model
    model = Sequential(loss=CrossEntropy(class_count=None), metric=Accuracy())
    model.add(
        Conv2D(num_filters=n1,
               kernel_shape=(d, k1),
               input_shape=x_train.shape[:-1]))
    model.add(Relu())
    model.add(Conv2D(num_filters=n2, kernel_shape=(1, k2)))
    model.add(Relu())
    model.add(Flatten())
    model.add(Dense(nodes=y_train.shape[0]))
    model.add(Softmax())
    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
def evaluator(x_train, y_train, x_val, y_val, experiment_name="", **kwargs):
    print(kwargs)
    # Saving directories
    figure_file = "figures/" + experiment_name + "/" + dict_to_string(kwargs)
    model_file = "models/" + experiment_name + "/" + dict_to_string(kwargs)

    mt = MetricTracker()  # Stores training evolution info (losses and metrics)

    # Define model
    d = x_train.shape[0]
    n1 = kwargs["n1"]  # Filters of first Conv2D
    k1 = kwargs["k1"]  # First kernel y size
    n2 = kwargs["n2"]  # Filters of second Conv2D
    k2 = kwargs["k2"]  # Second kernel y size
    batch_size = kwargs["batch_size"]

    try:
        # Define model
        model = Sequential(loss=CrossEntropy(class_count=None),
                           metric=Accuracy())
        model.add(
            Conv2D(num_filters=n1,
                   kernel_shape=(d, k1),
                   input_shape=x_train.shape[:-1]))
        model.add(Relu())
        model.add(Conv2D(num_filters=n2, kernel_shape=(1, k2)))
        model.add(Relu())
        model.add(Flatten())
        model.add(Dense(nodes=y_train.shape[0]))
        model.add(Softmax())
        # Fit model
        model.fit(X=x_train,
                  Y=y_train,
                  X_val=x_val,
                  Y_val=y_val,
                  batch_size=batch_size,
                  epochs=1000,
                  lr=1e-2,
                  momentum=0.8,
                  l2_reg=0.001,
                  compensate=True,
                  callbacks=[mt])
    except Exception as e:
        print(e)
        return -1  # If configuration impossible
    model.save(model_file)

    # Write results
    n1 = str(n1)
    n2 = str(n2)
    k1 = str(k1)
    k2 = str(k2)
    batch_size = str(batch_size)
    subtitle = "n1:" + n1 + ", n2:" + n2 + ", k1:" + k1 + ", k2:" + k1 +\
               ", batch_size:" + batch_size
    mt.plot_training_progress(show=False,
                              save=True,
                              name=figure_file,
                              subtitle=subtitle)

    # Maximizing value: validation accuracy
    return model.val_metric
Ejemplo n.º 5
0
if __name__ == "__main__":
    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    x_val, y_val = LoadXY("data_batch_2")
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=800, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dropout(ones_ratio=0.50))
    model.add(Dense(nodes=10, input_dim=800))
    model.add(Softmax())

    ns = 500

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    # bms = BestModelSaver(save_dir=None)  # Saves model with highest val_metric
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-3,
                                lr_max=1e-1,
                                ns=ns)  # Modifies lr while training
def evaluator(x_train,
              y_train,
              x_val,
              y_val,
              x_test,
              y_test,
              experiment_name="",
              **kwargs):
    # Saving directories
    figure_file = "figures/" + experiment_name + "/" + dict_to_string(kwargs)
    model_file = "models/" + experiment_name + "/" + dict_to_string(kwargs)

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=50, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=50))
    model.add(Softmax())

    # Pick metaparams
    batch_size = 100
    ns = 2 * np.floor(x_train.shape[1] / batch_size)
    iterations = 4 * ns  # 2 cycles

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    # bms = BestModelSaver(save_dir=None)
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-5,
                                lr_max=1e-1,
                                ns=ns)
    # callbacks = [mt, bms, lrs]
    callbacks = [mt, lrs]

    # Adjust logarithmic
    kwargs["l2_reg"] = 10**kwargs["l2_reg"]

    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=batch_size,
              epochs=None,
              iterations=iterations,
              **kwargs,
              callbacks=callbacks)

    # Write results
    # best_model = bms.get_best_model()
    test_acc = model.get_metric_loss(x_test, y_test)[0]
    subtitle = "l2_reg: " + str(
        kwargs["l2_reg"]) + ", Test Acc: " + str(test_acc)
    mt.plot_training_progress(show=False,
                              save=True,
                              name=figure_file,
                              subtitle=subtitle)

    # Maximizing value: validation accuracy
    # val_metric = bms.best_metric
    val_metric = model.get_metric_loss(x_val, y_val)[0]
    return val_metric
Ejemplo n.º 7
0
if __name__ == "__main__":
    # Load data
    x_train, y_train, x_val, y_val, x_test, y_test = read_mnist(n_train=200,
                                                                n_val=200,
                                                                n_test=2)

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info (losses and metrics)
    # lrs = LearningRateScheduler(evolution="linear", lr_min=1e-3, lr_max=9e-1)
    # lrs = LearningRateScheduler(evolution="constant", lr_min=1e-3, lr_max=9e-1)
    # callbacks = [mt, lrs]
    callbacks = [mt]

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(
        Conv2D(num_filters=64, kernel_shape=(4, 4), input_shape=(28, 28, 1)))
    model.add(Relu())
    model.add(MaxPool2D(kernel_shape=(2, 2)))
    # model.add(Conv2D(num_filters=32, kernel_shape=(3, 3)))
    # model.add(Relu())
    model.add(Flatten())
    # model.add(Flatten(input_shape=(28, 28, 1)))
    model.add(Dense(nodes=400))
    model.add(Relu())
    model.add(Dense(nodes=10))
    model.add(Softmax())

    # for filt in model.layers[0].filters:
    #     print(filt)
Ejemplo n.º 8
0
    a = np.divide(
                np.abs(analytical_grad_weight-numerical_grad_w),
                np.multiply(denom, (denom > _EPS)) + np.multiply(_EPS*np.ones(denom.shape), (denom <= _EPS)))
    np.set_printoptions(suppress=True)
    print(np.round(a*100,decimals=2))
    av_error = np.average(a)
    max_error = np.max(a)
    print("Averaged Element-Wise Relative Error:", av_error*100, "%")
    print("Max Element-Wise Relative Error:", max_error*100, "%")
    # np.set_printoptions(suppress=False)


if __name__ == "__main__":
    # Define model
    v_rnn = VanillaRNN(state_size=state_size, input_size=K, output_size=K)
    model = Sequential(loss=CrossEntropy(class_count=None), metric=Accuracy())
    model.add(v_rnn)


    model.layers[0].reset_state(copy.deepcopy(state))
    # print(model.layers[0].c)

    # Fit model
    l2_reg = 0.0
    model.fit(X=encoded_data, epochs=1, lr = 2e-2, momentum=0.95, l2_reg=l2_reg,
              batcher=RnnBatcher(seq_length), callbacks=[])
    print(model.layers[0].dl_dc)
    anal = copy.deepcopy(model.layers[0].dl_dc)

    model.layers[0].reset_state(copy.deepcopy(state))
Ejemplo n.º 9
0
def evaluator(l2_reg):
    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=800, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=800))
    model.add(Softmax())

    ns = 800

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-3,
                                lr_max=1e-1,
                                ns=ns)  # Modifies lr while training
    callbacks = [mt, lrs]

    # Fit model
    iterations = 4 * ns
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              iterations=iterations,
              l2_reg=l2_reg,
              shuffle_minibatch=True,
              callbacks=callbacks)
    model.save("models/yes_dropout_test")

    # Test model
    val_acc = model.get_metric_loss(x_val, y_val)[0]
    test_acc = model.get_metric_loss(x_test, y_test)[0]
    subtitle = "L2 param: " + str(l2_reg) + ", Test acc: " + str(test_acc)
    mt.plot_training_progress(show=True,
                              save=True,
                              name="figures/l2reg_optimization/" + str(l2_reg),
                              subtitle=subtitle)
    print("Val accuracy:", val_acc)
    print("Test accuracy:", test_acc)
    return val_acc
Ejemplo n.º 10
0
if __name__ == "__main__":
    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    x_val, y_val = LoadXY("data_batch_2")
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=50, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=50))
    model.add(Softmax())

    ns = 800

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-7,
                                lr_max=1e-2,
                                ns=ns)  # Modifies lr while training
    callbacks = [mt, lrs]
Ejemplo n.º 11
0
    # print(y_train.shape)

    # for i in range(200):
    #     cv2.imshow("image", x_train[..., i])
    #     cv2.waitKey()

    # Define callbacks
    mt = MetricTracker(file_name="cifar_test_3")  # Stores training evolution info (losses and metrics)
    # bms = BestModelSaver("models/best_cifar")  # Stores training evolution info (losses and metrics)
    lrs = LearningRateScheduler(evolution="cyclic", lr_min=1e-3, lr_max=0.2, ns=500)
    # lrs = LearningRateScheduler(evolution="constant", lr_min=1e-3, lr_max=9e-1)
    # callbacks = [mt, lrs]
    callbacks = [mt, lrs]

    # Define architecture (copied from https://appliedmachinelearning.blog/2018/03/24/achieving-90-accuracy-in-object-recognition-task-on-cifar-10-dataset-with-keras-convolutional-neural-networks/)
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Conv2D(num_filters=32, kernel_shape=(3, 3), stride=2, input_shape=(32, 32, 3)))
    model.add(Relu())
    model.add(Conv2D(num_filters=64, kernel_shape=(3, 3)))
    model.add(Relu())
    model.add(MaxPool2D(kernel_shape=(2, 2), stride=2))
    model.add(Conv2D(num_filters=128, kernel_shape=(2, 2)))
    model.add(Relu())
    model.add(MaxPool2D(kernel_shape=(2, 2)))
    model.add(Flatten())
    model.add(Dense(nodes=200))
    model.add(Relu())
    model.add(Dense(nodes=10))
    model.add(Softmax())

    x_val, y_val = LoadXY("data_batch_2")
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    x = x_train[:, 0:20]
    y = y_train[:, 0:20]
    reg = 0.1

    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=50, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=50))
    model.add(Softmax())

    anal_time = time.time()
    model.fit(
        x,
        y,
        batch_size=None,
        epochs=1,
        lr=0,  # 0 lr will not change weights
        momentum=0,
        l2_reg=reg)
    analytical_grad = model.layers[0].gradient
Ejemplo n.º 13
0
        y_train = np.concatenate((y_train, y), axis=1)
    x_val = x_train[:, -1000:]
    y_val = y_train[:, -1000:]
    x_train = x_train[:, :-1000]
    y_train = y_train[:, :-1000]
    x_test, y_test = getXY(LoadBatch("test_batch"))

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Modelling
    model = Sequential(loss="categorical_hinge")
    model.add(
        Dense(nodes=10, input_dim=x.shape[0], weight_initialization="fixed"))

    best_model = model.fit(
        X=x_train,
        Y=y_train,
        X_val=x_val,
        Y_val=y_val,
        batch_size=20,
        epochs=100,
        lr=0.001,  # 0 lr will not change weights
        momentum=0.5,
        l2_reg=0.05,
        save_path="models/svm/test_2")
    best_model.plot_training_progress(show=False,
    # Put it in a data folder

    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    x_val, y_val = LoadXY("data_batch_2")
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Softmax())

    # Fit model
    #     model.load("models/mlp_test")
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=40,
              lr=0.001,
              momentum=0.0,
              l2_reg=0.0,
              shuffle_minibatch=False)
Ejemplo n.º 15
0
    # x_val, y_val = getXY(LoadBatch("data_batch_2"))
    # x_test, y_test = getXY(LoadBatch("test_batch"))

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x)/std_x
    # x_val = (x_val - mean_x)/std_x
    # x_test = (x_test - mean_x)/std_x

    x = x_train[:, 0:5]
    y = y_train[:, 0:5]
    reg = 0.1

    # Define model
    model = Sequential(loss="categorical_hinge")
    model.add(Dense(nodes=10, input_dim=x.shape[0], weight_initialization="fixed"))

    anal_time = time.time()
    model.fit(x, y,
              batch_size=10000, epochs=1, lr=0, # 0 lr will not change weights
              momentum=0, l2_reg=reg)
    analytical_grad = model.layers[0].gradient
    anal_time = anal_time - time.time()

    # Get Numerical gradient
    num_time = time.time()
    numerical_grad = ComputeGradsNum(x, y, model, l2_reg=reg, h=0.001)
    print(numerical_grad.shape)
    num_time = num_time - time.time()
    x_val, y_val = getXY(LoadBatch("data_batch_2"))
    x_test, y_test = getXY(LoadBatch("test_batch"))

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    x = x_train[:, 0]
    y = y_train[:, 0]
    reg = 0.1

    # Define model
    model = Sequential(loss="cross_entropy")
    model.add(
        Dense(nodes=10, input_dim=x.shape[0], weight_initialization="fixed"))
    model.add(Activation("softmax"))

    anal_time = time.time()
    model.fit(
        x,
        y,
        batch_size=10000,
        epochs=1,
        lr=0,  # 0 lr will not change weights
        momentum=0,
        l2_reg=reg)
    analytical_grad = model.layers[0].gradient
    anal_time = anal_time - time.time()
def evaluator(x_train, y_train, x_val, y_val, **kwargs):
    # Define model
    model = Sequential(loss="cross_entropy")
    model.add(
        Dense(nodes=10, input_dim=x_train.shape[0], weight_initialization="fixed"))
    model.add(Activation("softmax"))

    # Fit model
    model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val, **kwargs)
    model.plot_training_progress(show=False, save=True, name="figures/" + dict_to_string(kwargs))
    # model.save("models/" + dict_to_string(kwargs))

    # Minimizing value:
    value = model.get_classification_metrics(x_val, y_val)[0] # Get accuracy
    result = {"value": value, "model": model}  # Save score and model
    return result
Ejemplo n.º 18
0
    # Put it in a Data folder

    # Load data
    x_train, y_train = getXY(LoadBatch("data_batch_1"))
    x_val, y_val = getXY(LoadBatch("data_batch_2"))
    x_test, y_test = getXY(LoadBatch("test_batch"))

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define SVM multi-class model
    model = Sequential(loss="categorical_hinge")
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Activation("softmax"))

    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=100,
              lr=0.0001,
              momentum=0.1,
              l2_reg=0.1)
    model.plot_training_progress()
Ejemplo n.º 19
0
    return grads_w, grads_b

if __name__ == "__main__":
    x_train, y_train, x_val, y_val, x_test, y_test = read_cifar_10(n_train=3, n_val=5, n_test=2)
    # x_train, y_train, x_val, y_val, x_test, y_test = read_mnist(n_train=2, n_val=5, n_test=2)
    # x_train, y_train, x_val, y_val, x_test, y_test = read_names(n_train=500)

    class_sum = np.sum(y_train, axis=1)*y_train.shape[0]
    class_count = np.reciprocal(class_sum, where=abs(class_sum) > 0)

    print(class_count)

    print(type(x_train[0, 0, 0]))

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Conv2D(num_filters=2, kernel_shape=(4, 4), stride=3, dilation_rate=2, input_shape=x_train.shape[0:-1]))
    model.add(Relu())
    model.add(MaxPool2D((2, 2), stride=3))
    model.add(Flatten())
    model.add(Dense(nodes=y_train.shape[0]))
    model.add(Relu())
    model.add(Softmax())

    print(np.min(np.abs(model.layers[0].filters)))

    reg = 0.0

    # Fit model
    anal_time = time.time()
    model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val,
    "/Toy-DeepLearning-Framework/")

from mlp.metrics import Accuracy
from mlp.models import Sequential
from mlp.losses import CrossEntropy
from mlp.layers import Conv2D, Dense, Softmax, Relu, Flatten, Dropout, MaxPool2D
from mlp.callbacks import MetricTracker, BestModelSaver, LearningRateScheduler
from mlp.utils import plot_confusion_matrix

np.random.seed(1)

if __name__ == "__main__":
    # Load data
    x_train, y_train, x_val, y_val, _, _ = read_names(n_train=-1)
    print(x_train.shape)
    classes = read_names_countries()

    # Load model model
    model = Sequential(loss=CrossEntropy())
    # model.load("models/names_test")
    model.load(
        "models/name_metaparam_search_2/n1-39_n2-33_k1-2_k2-10_batch_size-50")

    y_pred_train = model.predict_classes(x_train)
    y_pred_val = model.predict_classes(x_val)

    plot_confusion_matrix(y_pred_train, y_train, classes,
                          "figures/conf_best_model")
    plot_confusion_matrix(y_pred_val, y_val, classes,
                          "figures/conf_best_model_val")