Пример #1
0
def seq_mlp_mnist():
    """test MLP with MNIST data and Sequential

    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets('./tmp/data', one_hot=True)
    training_data = np.array([image.flatten() for image in mnist.train.images])
    training_label = mnist.train.labels
    valid_data = np.array(
        [image.flatten() for image in mnist.validation.images])
    valid_label = mnist.validation.labels
    input_dim = training_data.shape[1]
    label_size = training_label.shape[1]

    model = Sequential()
    model.add(Input(input_shape=(input_dim, )))
    model.add(Dense(300, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Softmax(label_size))
    model.compile('CE', optimizer=SGD())
    model.fit(training_data,
              training_label,
              validation_data=(valid_data, valid_label),
              metric='Accuracy',
              peek_type='single-cls')
Пример #2
0
def model_mlp_random_cls():
    """test MLP with random data and Model

    """
    input_size = 600
    input_dim = 20
    label_size = 2
    train_X = np.random.random((input_size, input_dim))
    # train_y = np.zeros((input_size, label_size))
    train_y = np.random.randint(0, label_size, (input_size, 1))
    # for _ in range(input_size):
    #     train_y[_, np.random.randint(0, label_size)] = 1

    Inputs = Input(input_shape=input_dim)
    X = Dense(100, activation='relu')(Inputs)
    X = Softmax(label_size)(X)
    model = Model(Inputs, X)
    model.compile('CE')
    model.fit(train_X,
              train_y,
              batch_size=256,
              verbose=20,
              epochs=100,
              metric='Accuracy',
              peek_type='single-cls')
Пример #3
0
def svm():
    print(10 * '#' + ' SGD SVM model ' + 10 * '#')

    # build the linear model with gradient descent
    # define layer
    Inputs = Input(input_shape=X_train.shape[1])
    # X = Linear(output_dim=64,
    #            regularizer=L2_Regularizer(1e-5),
    #            # regularizer=L1_Regularizer(1e-2),
    #            # regularizer=L1L2_Regularizer(l2=1),
    #            activation='swish')(Inputs)
    # X = Linear(output_dim=128,
    #            regularizer=L2_Regularizer(1e-5),
    #            # regularizer=L1_Regularizer(1e-2),
    #            # regularizer=L1L2_Regularizer(l2=1),
    #            activation='swish')(X)
    # X = Linear(output_dim=256,
    #            regularizer=L2_Regularizer(1e-5),
    #            # regularizer=L1_Regularizer(1e-2),
    #            # regularizer=L1L2_Regularizer(l2=1),
    #            activation='swish')(X)
    X = Linear(
        output_dim=1,
        # initializer='default_weight_initializer',
        initializer=default_weight_initializer,
        regularizer=L2_Regularizer(1e-5),
        # regularizer=L1_Regularizer(1e-2),
        # regularizer=L1L2_Regularizer(l2=1),
        activation=None)(Inputs)
    model = Model(Inputs, X)
    # model.compile('HL', optimizer=SGD(lr=0.001))
    model.compile('HL', optimizer=Adam(lr=0.001))
    model.fit(X_train,
              y_train,
              verbose=10,
              epochs=100,
              validation_data=(X_test, y_test),
              batch_size=64,
              metric='svm_binary_accuracy',
              shuffle=True,
              peek_type='single-svm-cls')
    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_accuracy')
    plt.plot(model.validation_metrics, label='valid_accuracy')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()
    print(10 * '#' + ' SGD SVM model end ' + 10 * '#')
    print()
Пример #4
0
def seq_cnn_mnist():
    """test CNN with MNIST data and Sequential

    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets('/tmp/data', one_hot=False)
    training_data = np.array(
        [image.reshape(28, 28, 1) for image in mnist.train.images])
    training_label = mnist.train.labels
    valid_data = np.array(
        [image.reshape(28, 28, 1) for image in mnist.validation.images])
    valid_label = mnist.validation.labels
    label_size = 10

    model = Sequential()
    model.add(Input(batch_input_shape=(None, 28, 28, 1)))
    model.add(Conv2d(3, 16, stride=1, padding=2, activation='relu'))
    # model.add(MaxPooling2D(4, stride=2))
    model.add(AvgPooling2D(4, stride=2))
    model.add(Conv2d(2, 32, stride=1, padding=0, activation='relu'))
    # model.add(MaxPooling2D(3, stride=2))
    model.add(AvgPooling2D(3, stride=2))
    model.add(Conv2d(1, 64, stride=1, padding=0, activation='relu'))
    # model.add(MaxPooling2D(3, stride=3))
    # model.add(AvgPooling2D(3, stride=3))

    model.add(Flatten())
    model.add(Softmax(label_size))
    model.compile('CE', optimizer=Adam(lr=1e-3))
    # model.fit(training_data, training_label, validation_data=(valid_data, valid_label),
    #           batch_size=256, verbose=1, epochs=5, metric='Accuracy', peek_type='single-cls')
    # model.fit(training_data[:1000], training_label[:1000], validation_data=(valid_data[:1000], valid_label[:1000]),
    #           batch_size=256, verbose=1, epochs=10, metric='Accuracy', peek_type='single-cls')
    model.fit(training_data[:100],
              training_label[:100],
              validation_data=(valid_data[:50], valid_label[:50]),
              batch_size=256,
              verbose=10,
              epochs=100,
              metric='Accuracy',
              peek_type='single-cls')
    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_accuracy')
    plt.plot(model.validation_metrics, label='valid_accuracy')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()
Пример #5
0
def dlr():
    print(10 * '#' + ' Linear model ' + 10 * '#')
    # build the linear model with gradient descent
    # define layer
    input_dim = train_x.shape[1]
    Inputs = Input(input_shape=input_dim)
    X = Linear(output_dim=1, activation=None,
               regularizer=L2_Regularizer(1),
               initializer=ones)(Inputs)
    model = Model(Inputs, X)

    # lr = 0.001 for grade point prediction, use MSE is a lot better than MAE
    # model.compile('MSE', optimizer=SGD(lr=0.001))

    # lr = 0.01 for score prediction, use MAE is slightly better than MSE
    model.compile('MAE', optimizer=SGD(lr=0.01))

    # or we can use HB (huber loss) for both two
    # but remember lr = 0.001 for grade point prediction
    # and lr = 0.01 for score prediction
    # model.compile('HB', optimizer=SGD(lr=0.01))

    model.fit(train_x, train_y,
              verbose=500, epochs=10000,
              validation_data=(test_x, test_y),
              batch_size=16, metric='mae',
              peek_type='single-reg')
    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_MAE')
    plt.plot(model.validation_metrics, label='valid_MAE')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()
    train_y_hat = model.forward(train_x)
    test_y_hat = model.forward(test_x)
    training_error = absolute_error(train_y, train_y_hat) / train_y.shape[0]
    test_error = absolute_error(test_y, test_y_hat) / test_y.shape[0]

    print('Training error: ', training_error)
    print('Test error: ', test_error)
    print(10 * '#' + ' Linear model end ' + 10 * '#')
    print()
    return X.params
Пример #6
0
def dlr():
    print(10 * '#' + ' SGD Factorization model ' + 10 * '#')

    # build the linear model with gradient descent
    # define layer
    X_train, y_train, X_val, y_val, X_test, y_test, num_user, num_item = read_data()
    Inputs = Input(input_shape=2)
    out = Factorization(a_dim=num_user, b_dim=num_item, k=10,
                        use_bias=False,
                        regularizer=L2_Regularizer(0.1))(Inputs)
    # out = Factorization(a_dim=num_user, b_dim=num_item, k=10)(Inputs)
    model = Model(Inputs, out)
    # model.compile('MSE', optimizer=Adam(lr=0.001))
    model.compile(HuberLoss(), optimizer=Adam(lr=0.001))
    model.fit(X_train, y_train,
              verbose=10, epochs=300,
              validation_data=(X_val, y_val),
              batch_size=256, metric='MAE',
              shuffle=True,
              peek_type='single-reg')

    plt.plot(model.train_losses, label='$loss_{train}$')
    plt.plot(model.validation_losses, label='$loss_{val}$')
    plt.legend()
    plt.savefig('./loss.png', dpi=300)
    plt.show()
    plt.plot(model.train_metrics, label='$MAE_{train}$')
    plt.plot(model.validation_metrics, label='$MAE_{val}$')
    plt.legend()
    plt.savefig('./metric.png', dpi=300)
    plt.show()

    train_y_hat = model.forward(X_train)
    val_y_hat = model.forward(X_val)
    test_y_hat = model.forward(X_test)
    training_error = absolute_error(y_train, train_y_hat) / y_train.shape[0]
    val_error = absolute_error(y_val, val_y_hat) / y_val.shape[0]
    test_error = absolute_error(y_test, test_y_hat) / y_test.shape[0]

    print(model.best_performance(bigger=False))
    print('Training error: ', training_error)
    print('Val error: ', val_error)
    print('Test error: ', test_error)
    print(10 * '#' + ' SGD Factorization model end ' + 10 * '#')
    print()
Пример #7
0
def dlr():
    print(10 * '#' + ' SGD Linear model ' + 10 * '#')

    # build the linear model with gradient descent
    # define layer
    Inputs = Input(input_shape=X_train.shape[1])
    linear_out = Linear(output_dim=1, activation=None,
                        initializer=ones)(Inputs)
    model = Model(Inputs, linear_out)
    model.compile('MSE', optimizer=SGD(lr=0.01))
    model.fit(
        X_train,
        y_train,
        verbose=-1,
        epochs=5000,
        validation_data=(X_test, y_test),
        batch_size=256,
        metric='MAE',
        shuffle=True,
        # peek_type='single-reg'
    )

    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_metrics')
    plt.plot(model.validation_metrics, label='valid_metrics')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()

    train_y_hat = model.forward(X_train)
    test_y_hat = model.forward(X_test)
    training_error = mean_absolute_error(y_train,
                                         train_y_hat) / y_train.shape[0]
    test_error = mean_absolute_error(y_test, test_y_hat) / y_test.shape[0]

    print('Training error: ', training_error)
    print('Test error: ', test_error)
    print(10 * '#' + ' SGD Linear model end ' + 10 * '#')
    print()
Пример #8
0
def seq_cnn_face():
    X_train, y_train, X_val, y_val, X_test, y_test = read_data(size=28)
    print('train set positive class portion: %.2f (%d / %d)' %
          (np.mean(y_train), int(np.sum(y_train)), y_train.shape[0]))
    print('val set positive class portion: %.2f (%d / %d)' %
          (np.mean(y_val), int(np.sum(y_val)), y_val.shape[0]))
    print('test set positive class portion: %.2f (%d / %d)' %
          (np.mean(y_test), int(np.sum(y_test)), y_test.shape[0]))

    model = Sequential()
    model.add(Input(batch_input_shape=(None, *X_train.shape[1:])))
    model.add(Conv2d(3, 16, stride=1, padding=2, activation='swish'))
    # model.add(MaxPooling2D(4, stride=2))
    # model.add(AvgPooling2D(4, stride=2))
    model.add(Conv2d(2, 32, stride=1, padding=0, activation='swish'))
    # model.add(MaxPooling2D(3, stride=2))
    # model.add(AvgPooling2D(3, stride=2))
    model.add(Conv2d(1, 64, stride=1, padding=0, activation='swish'))

    model.add(Flatten())
    model.add(Softmax(2))
    model.compile('CE', optimizer=Adam(lr=1e-3))
    model.fit(X_train,
              y_train,
              validation_data=(X_val, y_val),
              batch_size=256,
              verbose=1,
              epochs=100,
              shuffle=True,
              metric='Accuracy',
              peek_type='single-cls')
    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_accuracy')
    plt.plot(model.validation_metrics, label='valid_accuracy')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()
Пример #9
0
def model_mlp_random_reg():
    """test MLP with random data and Sequential

    """
    input_size = 600
    input_dim = 20
    output_dim = 1
    train_X = np.random.random((input_size, input_dim))
    random_weight = np.random.random((input_dim, output_dim))
    random_noise = np.random.random((input_size, output_dim))
    train_y = np.dot(train_X, random_weight) + random_noise

    Inputs = Input(input_shape=input_dim)
    X = Dense(100, activation='relu')(Inputs)
    X = Dense(100, activation='relu')(X)
    X = Dense(output_dim, activation=None)(X)
    model = Model(Inputs, X)
    model.compile('MSE', optimizer='momentum')
    model.fit(
        train_X,
        train_y,
        verbose=100,
        epochs=600,
        batch_size=256,
        # validation_split=0.1,
        metric='MAE',
        peek_type='single-reg')
    print(len(model.train_losses))
    print(len(model.validation_losses))
    print(len(model.train_metrics))
    print(len(model.validation_metrics))
    plt.axis([0, len(model.train_losses), 0, 5])
    plt.plot(model.train_losses)
    plt.plot(model.validation_losses)
    # plt.plot(model.train_metrics)
    # plt.plot(model.validation_metrics)
    plt.show()
Пример #10
0
def dmlr():
    print(10 * '#' + ' SGD Deep Linear model ' + 10 * '#')

    # build the linear model with gradient descent
    # define layer
    Inputs = Input(input_shape=X_train.shape[1])
    linear_out = Linear(output_dim=64, activation='swish')(Inputs)
    linear_out = Linear(output_dim=128, activation='swish')(linear_out)
    linear_out = Linear(output_dim=256, activation='swish')(linear_out)
    linear_out = Linear(output_dim=1, activation=None)(linear_out)
    model = Model(Inputs, linear_out)
    model.compile('MSE', optimizer=Momentum(lr=0.0001))
    model.fit(
        X_train,
        y_train,
        verbose=100,
        epochs=500,
        validation_data=(X_test, y_test),
        batch_size=256,
        metric='MAE',
        shuffle=True,
        # peek_type='single-reg'
    )
    # y_pred = model.forward(X_test)
    # for yp, yt in zip(y_pred, y_test):
    #     print(yp, yt)
    plt.subplot(211)
    plt.plot(model.train_losses, label='train_losses')
    plt.plot(model.validation_losses, label='valid_losses')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.subplot(212)
    plt.plot(model.train_metrics, label='train_metrics')
    plt.plot(model.validation_metrics, label='valid_metrics')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
    plt.show()
    print(10 * '#' + ' SGD Deep Linear model end ' + 10 * '#')
    print()
Пример #11
0
def seq_mlp_random_cls():
    """test MLP with random data and Sequential

    """
    input_size = 600
    input_dim = 20
    label_size = 10
    train_X = np.random.random((input_size, input_dim))
    train_y = np.zeros((input_size, label_size))
    for _ in range(input_size):
        train_y[_, np.random.randint(0, label_size)] = 1

    model = Sequential()
    model.add(Input(input_shape=input_dim))
    model.add(Dense(100, activation='relu'))
    model.add(Softmax(label_size))
    model.compile('CE')
    model.fit(train_X,
              train_y,
              verbose=100,
              epochs=5000,
              validation_split=0.2,
              metric='Accuracy',
              peek_type='single-cls')