コード例 #1
0
def evaluator(x_train, y_train, x_val, y_val, x_test, y_test, experiment_name="", init="fixed", **kwargs):
    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Softmax())

    # Fit model
    model_save_path = "models/" + experiment_name + "/" + dict_to_string(kwargs) + "_" + init
    best_model = model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val,
                           save_path=model_save_path, **kwargs)
    
    # Plot results
    test_acc = best_model.get_classification_metrics(x_test, y_test)[0]
    subtitle = "l2_reg: " + str(kwargs["l2_reg"]) + ", lr: " + str(kwargs["lr"]) +\
                ", weight_init:" + init + ", Test Acc: " + str(test_acc)
    best_model.plot_training_progress(show=False,
                                    save=True,
                                    name="figures/" + experiment_name + "/" + dict_to_string(kwargs) + "_" + init,
                                    subtitle=subtitle)
    montage(W=np.array(best_model.layers[0].weights[:, :-1]),
            title=subtitle,
            path="figures/" + experiment_name + "/weights/" + dict_to_string(kwargs) + "_" + init)

    # Minimizing value: validation accuracy
    val_acc = best_model.get_classification_metrics(x_val, y_val)[0] # Get accuracy
    result = {"value": val_acc, "model": best_model}  # Save score and model
    return result
コード例 #2
0
def evaluator(x_train, y_train, x_val, y_val, **kwargs):
    # Define model
    model = Sequential(loss="cross_entropy")
    model.add(
        Dense(nodes=10, input_dim=x_train.shape[0], weight_initialization="fixed"))
    model.add(Activation("softmax"))

    # Fit model
    model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val, **kwargs)
    model.plot_training_progress(show=False, save=True, name="figures/" + dict_to_string(kwargs))
    # model.save("models/" + dict_to_string(kwargs))

    # Minimizing value:
    value = model.get_classification_metrics(x_val, y_val)[0] # Get accuracy
    result = {"value": value, "model": model}  # Save score and model
    return result
コード例 #3
0
def evaluator(l2_reg):
    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=800, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=800))
    model.add(Softmax())

    ns = 800

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-3,
                                lr_max=1e-1,
                                ns=ns)  # Modifies lr while training
    callbacks = [mt, lrs]

    # Fit model
    iterations = 4 * ns
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              iterations=iterations,
              l2_reg=l2_reg,
              shuffle_minibatch=True,
              callbacks=callbacks)
    model.save("models/yes_dropout_test")

    # Test model
    val_acc = model.get_metric_loss(x_val, y_val)[0]
    test_acc = model.get_metric_loss(x_test, y_test)[0]
    subtitle = "L2 param: " + str(l2_reg) + ", Test acc: " + str(test_acc)
    mt.plot_training_progress(show=True,
                              save=True,
                              name="figures/l2reg_optimization/" + str(l2_reg),
                              subtitle=subtitle)
    print("Val accuracy:", val_acc)
    print("Test accuracy:", test_acc)
    return val_acc
コード例 #4
0
    v_rnn = VanillaRNN(state_size=state_size, input_size=K, output_size=K)
    model = Sequential(loss=CrossEntropy(class_count=None), metric=Accuracy())
    model.add(v_rnn)
    model.add(Softmax())

    # state = np.array(np.random.normal(0.1, 1./100.,
    #         (v_rnn.state_size,1)))
    model.layers[0].reset_state(copy.deepcopy(state))
    print(model.layers[0].W)

    # Fit model
    l2_reg = 0.0
    t1 = time.time()
    model.fit(X=encoded_data,
              epochs=1,
              lr=0,
              l2_reg=l2_reg,
              batcher=RnnBatcher(seq_length),
              callbacks=[])
    t_anal = time.time() - t1
    print(model.layers[0].dl_dw)
    anal = copy.deepcopy(model.layers[0].dl_dw)

    model.layers[0].reset_state(copy.deepcopy(state))

    x = "abcdefg"
    y = "bcdefga"
    x = np.array([char_to_ind[char] for char in x])
    x = one_hotify(x, num_classes=K)
    y = np.array([char_to_ind[char] for char in y])
    y = one_hotify(y, num_classes=K)
コード例 #5
0
    std_x = np.std(x_train)
    x_train = (x_train - mean_x)/std_x
    # x_val = (x_val - mean_x)/std_x
    # x_test = (x_test - mean_x)/std_x

    x = x_train[:, 0:5]
    y = y_train[:, 0:5]
    reg = 0.1

    # Define model
    model = Sequential(loss="categorical_hinge")
    model.add(Dense(nodes=10, input_dim=x.shape[0], weight_initialization="fixed"))

    anal_time = time.time()
    model.fit(x, y,
              batch_size=10000, epochs=1, lr=0, # 0 lr will not change weights
              momentum=0, l2_reg=reg)
    analytical_grad = model.layers[0].gradient
    anal_time = anal_time - time.time()

    # Get Numerical gradient
    num_time = time.time()
    numerical_grad = ComputeGradsNum(x, y, model, l2_reg=reg, h=0.001)
    print(numerical_grad.shape)
    num_time = num_time - time.time()

    _EPS = 0.0000001
    denom = np.abs(analytical_grad) + np.abs(numerical_grad)
    av_error = np.average(
            np.divide(
                np.abs(analytical_grad-numerical_grad),
コード例 #6
0
    k1 = 6  # First kernel y size
    n2 = 20  # Filters of second Conv2D
    k2 = 4  # Second kernel y size
    # Define model
    model = Sequential(loss=CrossEntropy(class_count=None), metric=Accuracy())
    model.add(
        Conv2D(num_filters=n1,
               kernel_shape=(d, k1),
               input_shape=x_train.shape[:-1]))
    model.add(Relu())
    model.add(Conv2D(num_filters=n2, kernel_shape=(1, k2)))
    model.add(Relu())
    model.add(Flatten())
    model.add(Dense(nodes=y_train.shape[0]))
    model.add(Softmax())
    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=500,
              lr=1e-3,
              momentum=0.8,
              l2_reg=0.001,
              compensate=True,
              callbacks=callbacks)
    model.save("models/names_best")

    mt.plot_training_progress(save=True, name="figures/names_best")
    # y_pred_prob = model.predict(x_train)
コード例 #7
0
def evaluator(x_train, y_train, x_val, y_val, experiment_name="", **kwargs):
    print(kwargs)
    # Saving directories
    figure_file = "figures/" + experiment_name + "/" + dict_to_string(kwargs)
    model_file = "models/" + experiment_name + "/" + dict_to_string(kwargs)

    mt = MetricTracker()  # Stores training evolution info (losses and metrics)

    # Define model
    d = x_train.shape[0]
    n1 = kwargs["n1"]  # Filters of first Conv2D
    k1 = kwargs["k1"]  # First kernel y size
    n2 = kwargs["n2"]  # Filters of second Conv2D
    k2 = kwargs["k2"]  # Second kernel y size
    batch_size = kwargs["batch_size"]

    try:
        # Define model
        model = Sequential(loss=CrossEntropy(class_count=None),
                           metric=Accuracy())
        model.add(
            Conv2D(num_filters=n1,
                   kernel_shape=(d, k1),
                   input_shape=x_train.shape[:-1]))
        model.add(Relu())
        model.add(Conv2D(num_filters=n2, kernel_shape=(1, k2)))
        model.add(Relu())
        model.add(Flatten())
        model.add(Dense(nodes=y_train.shape[0]))
        model.add(Softmax())
        # Fit model
        model.fit(X=x_train,
                  Y=y_train,
                  X_val=x_val,
                  Y_val=y_val,
                  batch_size=batch_size,
                  epochs=1000,
                  lr=1e-2,
                  momentum=0.8,
                  l2_reg=0.001,
                  compensate=True,
                  callbacks=[mt])
    except Exception as e:
        print(e)
        return -1  # If configuration impossible
    model.save(model_file)

    # Write results
    n1 = str(n1)
    n2 = str(n2)
    k1 = str(k1)
    k2 = str(k2)
    batch_size = str(batch_size)
    subtitle = "n1:" + n1 + ", n2:" + n2 + ", k1:" + k1 + ", k2:" + k1 +\
               ", batch_size:" + batch_size
    mt.plot_training_progress(show=False,
                              save=True,
                              name=figure_file,
                              subtitle=subtitle)

    # Maximizing value: validation accuracy
    return model.val_metric
コード例 #8
0
    mt = MetricTracker()  # Stores training evolution info
    # bms = BestModelSaver(save_dir=None)  # Saves model with highest val_metric
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-3,
                                lr_max=1e-1,
                                ns=ns)  # Modifies lr while training
    # callbacks = [mt, bms, lrs]
    callbacks = [mt, lrs]

    # Fit model
    iterations = 4 * ns
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              iterations=iterations,
              momentum=0.89,
              l2_reg=1e-5,
              shuffle_minibatch=True,
              callbacks=callbacks)
    model.save("models/yes_dropout_test")

    # Test model
    # best_model = bms.get_best_model()
    # test_acc, test_loss = best_model.get_metric_loss(x_test, y_test)
    # subtitle = "No Dropout, Test acc: " + test_acc
    subtitle = ""
    mt.plot_training_progress(show=True,
                              save=True,
                              name="figures/test_dropout_test",
                              subtitle=subtitle)
コード例 #9
0
def evaluator(x_train,
              y_train,
              x_val,
              y_val,
              x_test,
              y_test,
              experiment_name="",
              **kwargs):
    # Saving directories
    figure_file = "figures/" + experiment_name + "/" + dict_to_string(kwargs)
    model_file = "models/" + experiment_name + "/" + dict_to_string(kwargs)

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=50, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=50))
    model.add(Softmax())

    # Pick metaparams
    batch_size = 100
    ns = 2 * np.floor(x_train.shape[1] / batch_size)
    iterations = 4 * ns  # 2 cycles

    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    # bms = BestModelSaver(save_dir=None)
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-5,
                                lr_max=1e-1,
                                ns=ns)
    # callbacks = [mt, bms, lrs]
    callbacks = [mt, lrs]

    # Adjust logarithmic
    kwargs["l2_reg"] = 10**kwargs["l2_reg"]

    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=batch_size,
              epochs=None,
              iterations=iterations,
              **kwargs,
              callbacks=callbacks)

    # Write results
    # best_model = bms.get_best_model()
    test_acc = model.get_metric_loss(x_test, y_test)[0]
    subtitle = "l2_reg: " + str(
        kwargs["l2_reg"]) + ", Test Acc: " + str(test_acc)
    mt.plot_training_progress(show=False,
                              save=True,
                              name=figure_file,
                              subtitle=subtitle)

    # Maximizing value: validation accuracy
    # val_metric = bms.best_metric
    val_metric = model.get_metric_loss(x_val, y_val)[0]
    return val_metric
コード例 #10
0
    x_train, y_train = getXY(LoadBatch("data_batch_1"))
    x_val, y_val = getXY(LoadBatch("data_batch_2"))
    x_test, y_test = getXY(LoadBatch("test_batch"))

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define SVM multi-class model
    model = Sequential(loss="categorical_hinge")
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Activation("softmax"))

    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=100,
              lr=0.0001,
              momentum=0.1,
              l2_reg=0.1)
    model.plot_training_progress()

    # Test model
    test_acc, test_loss = model.get_classification_metrics(x_test, y_test)
    print("Test accuracy:", test_acc)
コード例 #11
0
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=50, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dense(nodes=10, input_dim=50))
    model.add(Softmax())

    # Fit model
    x_train = x_train[:, 0:100]
    y_train = y_train[:, 0:100]
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=200,
              lr=0.001,
              momentum=0.0,
              l2_reg=0.0,
              shuffle_minibatch=False,
              save_path="models/mlp_overfit_test")
    model.plot_training_progress(save=True, name="figures/mlp_overfit_test")
    model.save("models/mlp_overfit_test")

    # Test model
    test_acc, test_loss = model.get_classification_metrics(x_test, y_test)
    print("Test accuracy:", test_acc)
コード例 #12
0
    # np.set_printoptions(suppress=False)


if __name__ == "__main__":
    # Define model
    v_rnn = VanillaRNN(state_size=state_size, input_size=K, output_size=K)
    model = Sequential(loss=CrossEntropy(class_count=None), metric=Accuracy())
    model.add(v_rnn)


    model.layers[0].reset_state(copy.deepcopy(state))
    # print(model.layers[0].c)

    # Fit model
    l2_reg = 0.0
    model.fit(X=encoded_data, epochs=1, lr = 2e-2, momentum=0.95, l2_reg=l2_reg,
              batcher=RnnBatcher(seq_length), callbacks=[])
    print(model.layers[0].dl_dc)
    anal = copy.deepcopy(model.layers[0].dl_dc)

    model.layers[0].reset_state(copy.deepcopy(state))

    x = np.array((char_to_ind['o'], char_to_ind['l'], char_to_ind['i'], ))
    x = one_hotify(x, num_classes = K)
    y = np.array((char_to_ind['l'], char_to_ind['i'], char_to_ind['s'], ))
    y = one_hotify(y, num_classes = K)

    grad_w = ComputeGradsNum(x, y, model, l2_reg, h=1e-6)
    num = copy.deepcopy(grad_w)
    print("num")
    print(grad_w)
コード例 #13
0
    model.add(Flatten())
    model.add(Dense(nodes=200))
    model.add(Relu())
    model.add(Dense(nodes=10))
    model.add(Softmax())


    # for filt in model.layers[0].filters:
    #     print(filt)
    # y_pred_prob = model.predict(x_train)
    # print(y_pred_prob)

    # Fit model
    # model.load("models/cifar_test_2")
    # mt.load("models/tracker")
    model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val,
              batch_size=100, epochs=20, momentum=0.9, l2_reg=0.003, callbacks=callbacks)
    model.save("models/cifar_test_3")
    # model.layers[0].show_filters()

    # for filt in model.layers[0].filters:
    #     print(filt)

    # print(model.layers[0].biases)

    mt.plot_training_progress()
    # y_pred_prob = model.predict(x_train)
    # # # model.pred
    # print(y_train)
    # print(np.round(y_pred_prob, decimals=2))
コード例 #14
0
    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-7,
                                lr_max=1e-2,
                                ns=ns)  # Modifies lr while training
    callbacks = [mt, lrs]

    # Fit model
    iterations = 6 * ns
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              iterations=iterations,
              l2_reg=10**-1.85,
              shuffle_minibatch=True,
              callbacks=callbacks)
    # model.save("models/yes_dropout_test")

    # # Test model
    val_acc = model.get_metric_loss(x_val, y_val)[0]
    test_acc = model.get_metric_loss(x_test, y_test)[0]
    subtitle = "Test acc: " + str(test_acc)
    mt.plot_training_progress(show=True,
                              save=True,
                              name="figures/lr_limits/final_train",
                              subtitle=subtitle)
    # mt.save("limits_test")
コード例 #15
0
    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Modelling
    model = Sequential(loss="categorical_hinge")
    model.add(
        Dense(nodes=10, input_dim=x.shape[0], weight_initialization="fixed"))

    best_model = model.fit(
        X=x_train,
        Y=y_train,
        X_val=x_val,
        Y_val=y_val,
        batch_size=20,
        epochs=100,
        lr=0.001,  # 0 lr will not change weights
        momentum=0.5,
        l2_reg=0.05,
        save_path="models/svm/test_2")
    best_model.plot_training_progress(show=False,
                                      save=True,
                                      name="figures/svm/test_2",
                                      subtitle="subtitle")
    test_acc = best_model.get_classification_metrics(x_test, y_test)[0]
    val_acc = best_model.get_classification_metrics(x_val, y_val)[0]
    print("test_acc:", test_acc)
    print("val_acc:", val_acc)
コード例 #16
0
    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy())
    model.add(Dense(nodes=10, input_dim=x_train.shape[0]))
    model.add(Softmax())

    # Fit model
    #     model.load("models/mlp_test")
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=40,
              lr=0.001,
              momentum=0.0,
              l2_reg=0.0,
              shuffle_minibatch=False)
    model.plot_training_progress(save=True, name="figures/mlp_test")
    model.save("models/mlp_test")

    # Test model
    test_acc, test_loss = model.get_classification_metrics(x_test, y_test)
    print("Test accuracy:", test_acc)
コード例 #17
0
    model.add(Dense(nodes=400))
    model.add(Relu())
    model.add(Dense(nodes=10))
    model.add(Softmax())

    # for filt in model.layers[0].filters:
    #     print(filt)
    # y_pred_prob = model.predict(x_train)
    # print(y_pred_prob)

    # Fit model
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              epochs=200,
              lr=1e-2,
              momentum=0.5,
              callbacks=callbacks)
    model.save("models/mnist_test_conv_2")
    # model.layers[0].show_filters()

    # for filt in model.layers[0].filters:
    #     print(filt)

    # print(model.layers[0].biases)

    mt.plot_training_progress()
    y_pred_prob = model.predict(x_train)
    # # # model.pred
コード例 #18
0
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Conv2D(num_filters=2, kernel_shape=(4, 4), stride=3, dilation_rate=2, input_shape=x_train.shape[0:-1]))
    model.add(Relu())
    model.add(MaxPool2D((2, 2), stride=3))
    model.add(Flatten())
    model.add(Dense(nodes=y_train.shape[0]))
    model.add(Relu())
    model.add(Softmax())

    print(np.min(np.abs(model.layers[0].filters)))

    reg = 0.0

    # Fit model
    anal_time = time.time()
    model.fit(X=x_train, Y=y_train, X_val=x_val, Y_val=y_val,
              batch_size=200, epochs=1, lr=0, momentum=0, l2_reg=reg)
    analytical_grad_weight = model.layers[0].filter_gradients
    analytical_grad_bias = model.layers[0].bias_gradients
    # print(analytical_grad_weight)
    print(analytical_grad_bias)
    anal_time = time.time() - anal_time

    # Get Numerical gradient
    num_time = time.time()
    numerical_grad_w, numerical_grad_b = ComputeGradsNum(x_train, y_train, model, l2_reg=reg, h=1e-5)
    # print(numerical_grad_w)
    print(numerical_grad_b)
    num_time = time.time() - num_time

    print("Weight Error:")
    _EPS = 0.0000001
コード例 #19
0
    # Define callbacks
    mt = MetricTracker()  # Stores training evolution info
    lrs = LearningRateScheduler(evolution="cyclic",
                                lr_min=1e-5,
                                lr_max=1e-1,
                                ns=ns)  # Modifies lr while training
    callbacks = [mt, lrs]

    # Fit model
    iterations = 2 * ns
    model.fit(X=x_train,
              Y=y_train,
              X_val=x_val,
              Y_val=y_val,
              batch_size=100,
              iterations=iterations,
              l2_reg=0.01,
              shuffle_minibatch=False,
              callbacks=callbacks)
    # model.save("models/yes_dropout_test")

    # Test model
    val_acc = model.get_metric_loss(x_val, y_val)[0]
    test_acc = model.get_metric_loss(x_test, y_test)[0]

    # Test model
    print("Test accuracy:", test_acc)

    # Plot evolution
    mt.plot_training_progress(show=True,