コード例 #1
0
def main():
    df = pd.read_csv("data/amazon_baby_subset.csv")
    reviews = df.loc[:, 'review'].values
    for ind, review in enumerate(reviews):
        if type(review) is float:
            reviews[ind] = ""

    reviews = clean_sentences("\n".join(reviews))
    with open("data/important_words.json") as f:
        important_words = json.load(f)
    reviews = reviews.split("\n")
    n = len(reviews)
    d = len(important_words)
    X = np.zeros((n, d))
    y = df.loc[:, 'sentiment'].values
    y[y == -1] = 0

    for ind, review in enumerate(reviews):
        for ind_w, word in enumerate(important_words):
            X[ind, ind_w] = review.count(word)
    ones = np.ones((n, 1))
    X = np.concatenate((X, ones), axis=1)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    epochs = 20
    learning_rate = 0.1
    batch_size = 64
    optimizer = SGD(alpha=learning_rate)
    logistic = LogisticRegression(epochs, optimizer, batch_size)
    logistic.train(X_train, y_train)
    pred = logistic.predict(X_test)
    y_test = y_test.reshape((-1, 1))
    print("Accuracy: " + str(len(pred[pred == y_test]) / len(pred)))
コード例 #2
0
ファイル: main_rnn.py プロジェクト: fechau/ml-from-scratch
def main():
    start_token = " "
    pad_token = "#"

    with open("names") as f:
        names = f.read()[:-1].split('\n')
        names = [start_token + name for name in names]
    print('number of samples:', len(names))
    MAX_LENGTH = max(map(len, names))
    print("max length:", MAX_LENGTH)

    tokens = set()
    for name in names:
        temp_name = set(list(name))
        for t_n in temp_name:
            tokens.add(t_n)
    tokens.add("#")

    tokens = list(tokens)
    n_tokens = len(tokens)
    print('n_tokens:', n_tokens)

    token_to_id = dict()
    for ind, token in enumerate(tokens):
        token_to_id[token] = ind

    def to_matrix(names,
                  max_len=None,
                  pad=token_to_id[pad_token],
                  dtype=np.int32):
        """Casts a list of names into rnn-digestable padded matrix"""

        max_len = max_len or max(map(len, names))
        names_ix = np.zeros([len(names), max_len], dtype) + pad
        for i in range(len(names)):
            name_ix = list(map(token_to_id.get, names[i]))
            names_ix[i, :len(name_ix)] = name_ix

        return names_ix

    matrix_sequences = to_matrix(names)

    train_X = matrix_sequences[:, :-1]
    m, length = matrix_sequences.shape
    input_sequences = np.zeros(shape=(m, length, n_tokens))
    for i in range(m):
        input_sequences[i] = to_categorical(matrix_sequences[i],
                                            n_tokens,
                                            dtype='int32')
    del matrix_sequences
    train_X = input_sequences[:, :-1, :]
    train_Y = input_sequences[:, 1:, :]

    optimizer = SGD()
    epochs = 5

    rnn = RNN(hidden_units=64, epochs=epochs, optimizer=optimizer)
    rnn.train(train_X, train_Y)
コード例 #3
0
ファイル: main_cnn.py プロジェクト: triicst/ml-from-scratch-1
def test():
    import numpy as np
    from nn_components.layers import ConvLayer, PoolingLayer
    from optimizations_algorithms.optimizers import SGD
    import tensorflow as tf
    tf.enable_eager_execution()

    filter_size = (3, 3)
    filters = 16
    padding = "SAME"
    stride = 1

    optimizer = SGD()

    conv_layer = ConvLayer(filter_size=filter_size, filters=filters, padding=padding, stride=stride)
    conv_layer.initialize_optimizer(optimizer)
    conv_layer.debug = True

    pool_filter_size = (2, 2)
    pool_stride = 2
    pool_mode = "max"
    pool_layer = PoolingLayer(filter_size=pool_filter_size, stride=pool_stride, mode=pool_mode)

    X = np.random.normal(size=(16, 12, 12, 3))

    d_prev = np.random.normal(size=(16, 12, 12, 16))

    my_conv_forward = conv_layer.forward(X)
    my_dA, my_dW = conv_layer.backward(d_prev, X) 
    my_pool_forward = pool_layer.forward(X)

    with tf.device("/cpu:0"):
        tf_conv_forward = tf.nn.conv2d(X, conv_layer.W, strides=(stride, stride), padding=padding).numpy()
        tf_dW = tf.nn.conv2d_backprop_filter(X, filter_sizes=filter_size + (X.shape[-1], filters), out_backprop=d_prev,
                                            strides=(1, stride, stride, 1), padding=padding).numpy()
        tf_dA = tf.nn.conv2d_backprop_input(input_sizes=X.shape, filter=conv_layer.W, out_backprop=d_prev, 
                                            strides=(1, stride, stride, 1), padding=padding).numpy()

        tf_pool_forward = tf.nn.max_pool2d(X, ksize=pool_filter_size, strides=(pool_stride, pool_stride), padding="VALID")

    blank = "----------------------"
    print(blank + "TEST FORWARD CONVOLUTION" + blank)
    forward_result = np.allclose(my_conv_forward, tf_conv_forward)
    forward_out = "PASS" if forward_result else "FAIL"
    print("====> " + forward_out)
    
    print(blank + "TEST BACKWARD CONVOLUTION" + blank)
    dW_result = np.allclose(my_dW, tf_dW)
    dW_out = "PASS" if dW_result else "FAIL"
    print("====> dW case: " + dW_out)
    dA_result = np.allclose(my_dA, tf_dA)
    dA_out = "PASS" if dA_result else "FAIL"
    print("====> dA case: " + dA_out)

    print(blank + "TEST FORWARD POOLING" + blank)
    pool_result = np.allclose(my_pool_forward, tf_pool_forward)
    pool_out = "PASS" if pool_result else "FAIL"
    print("====> " + pool_out)
コード例 #4
0
def main():
    load_dataset_mnist("../libs")
    mndata = MNIST('../libs/data_mnist', gz=True)
    weight_path = "nn_weights.pkl"
    training_phase = weight_path not in os.listdir(".")
    if training_phase:
        images, labels = mndata.load_training()
        images, labels = preprocess_data(images, labels)
        epochs = 10
        batch_size = 64
        learning_rate = 0.01

        optimizer = SGD(learning_rate)
        loss_func = CrossEntropy()
        archs = [
            InputLayer(),
            FCLayer(num_neurons=100, weight_init="he_normal"),
            ActivationLayer(activation="relu"),
            FCLayer(num_neurons=125, weight_init="he_normal"),
            ActivationLayer(activation="relu"),
            FCLayer(num_neurons=50, weight_init="he_normal"),
            BatchNormLayer(),
            ActivationLayer(activation="relu"),
            FCLayer(num_neurons=labels.shape[1], weight_init="he_normal"),
            ActivationLayer(activation="softmax"),
        ]
        nn = NeuralNetwork(optimizer=optimizer,
                           layers=archs,
                           loss_func=loss_func)

        trainer = Trainer(nn, batch_size, epochs)
        trainer.train(images, labels)
        trainer.save_model("nn_weights.pkl")
    else:
        import pickle
        images_test, labels_test = mndata.load_testing()
        images_test, labels_test = preprocess_data(images_test,
                                                   labels_test,
                                                   test=True)
        with open(weight_path, "rb") as f:
            nn = pickle.load(f)
        pred = nn.predict(images_test)

        print("Accuracy:", len(pred[labels_test == pred]) / len(pred))
        from sklearn.metrics.classification import confusion_matrix

        print("Confusion matrix: ")
        print(confusion_matrix(labels_test, pred))
コード例 #5
0
def main():
    X = np.loadtxt('prostate.data.txt', skiprows=1)
    columns = [
        'lcavol', 'lweight', 'age', 'lbph', 'svi', 'lcp', 'gleason', 'pgg45'
    ]
    y = X[:, -1]
    X = X[:, :-1]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    (X_train, _, _), (y_train, _, _) = standardize_regression(X_train, y_train)
    y_train = y_train.reshape((-1, 1))

    alpha = 0.01
    epochs = 500
    lambda_ = 0
    optimizer = SGD(alpha=alpha)
    linear_regression = LinearRegression(optimizer, epochs, lambda_)
    linear_regression.train(X_train, y_train)

    (X_test, x_mean, x_std), (y_test, y_mean,
                              y_std) = standardize_regression(X_test, y_test)
    pred = linear_regression.predict(X_test)
    y_test = y_test.reshape((-1, 1))
    print("Test score: %f" % linear_regression.r2_score(pred, y_test))
コード例 #6
0
from neural_network import NeuralNetwork
from libs.utils import load_dataset_mnist, preprocess_data
from libs.mnist_lib import MNIST

load_dataset_mnist("../libs")
mndata = MNIST('../libs/data_mnist')
weight_path = "nn_weights.pickle"
training_phase = weight_path not in os.listdir(".")
if training_phase:
    images, labels = mndata.load_training()
    images, labels = preprocess_data(images, labels)
    epochs = 20
    batch_size = 64
    learning_rate = 0.1

    sgd = SGD(learning_rate)
    archs = [{
        "num_neurons": 100,
        "weight_init": "he",
        "activation": "sigmoid",
        "batch_norm": None
    }, {
        "num_neurons": 125,
        "weight_init": "he",
        "activation": "sigmoid",
        "batch_norm": None
    }, {
        "num_neurons": 50,
        "weight_init": "he",
        "activation": "sigmoid",
        "batch_norm": None
コード例 #7
0
def main(use_keras=False):
    load_dataset_mnist("../libs")
    mndata = MNIST('../libs/data_mnist')
    arch = [
        {
            "type": "conv",
            "filter_size": (3, 3),
            "filters": 6,
            "padding": "SAME",
            "stride": 1,
            "activation": "sigmoid",
            "weight_init": "he",
            "batch_norm": None
        },
        {
            "type": "pool",
            "filter_size": (2, 2),
            "stride": 2,
            "mode": "max"
        },
        {
            "type": "conv",
            "filter_size": (3, 3),
            "filters": 16,
            "padding": "SAME",
            "stride": 1,
            "activation": "sigmoid",
            "weight_init": "he",
            "batch_norm": None
        },
        {
            "type": "pool",
            "filter_size": (2, 2),
            "stride": 2,
            "mode": "max"
        },
        {
            "type": "conv",
            "filter_size": (3, 3),
            "filters": 32,
            "padding": "SAME",
            "stride": 1,
            "activation": "sigmoid",
            "weight_init": "he",
            "batch_norm": None
        },
        {
            "type": "pool",
            "filter_size": (2, 2),
            "stride": 2,
            "mode": "max"
        },
        "flatten",
        {
            "type": "fc",
            "num_neurons": 128,
            "weight_init": "he",
            "activation": "sigmoid",
            "batch_norm": None
        },  # use "batch_norm": None
        {
            "type": "fc",
            "num_neurons": 64,
            "weight_init": "he",
            "activation": "sigmoid",
            "batch_norm": None
        },
        {
            "type": "fc",
            "num_neurons": 10,
            "weight_init": "he",
            "activation": "softmax"
        }
    ]
    epochs = 5
    batch_size = 32
    learning_rate = 0.1
    if use_keras:
        from keras.optimizers import SGD as SGDKeras
        training_phase = True
        optimizer = SGDKeras(lr=learning_rate)
        cnn = CNNKeras(epochs=epochs,
                       batch_size=batch_size,
                       optimizer=optimizer,
                       cnn_structure=arch)
    else:
        optimizer = SGD(alpha=learning_rate)
        cnn = CNN(epochs=epochs,
                  batch_size=batch_size,
                  optimizer=optimizer,
                  cnn_structure=arch)
        weight_path = "cnn_weights.pickle"
        training_phase = weight_path not in os.listdir(".")
    if training_phase:
        images, labels = mndata.load_training()
        images, labels = preprocess_data(images, labels, nn=True)

        if not use_keras:
            cnn.train(images[:10000], labels[:10000])
            cnn.save(weight_path)
        else:
            cnn.train(images, labels)
            training_phase = False
    if not training_phase:
        import pickle
        images_test, labels_test = mndata.load_testing()
        images_test, labels_test = preprocess_data(images_test,
                                                   labels_test,
                                                   test=True)
        if not use_keras:
            with open(weight_path, "rb") as f:
                cnn = pickle.load(f)
        pred = cnn.predict(images_test)

        print("Accuracy:", len(pred[labels_test == pred]) / len(pred))
        from sklearn.metrics.classification import confusion_matrix

        print("Confusion matrix: ")
        print(confusion_matrix(labels_test, pred))
コード例 #8
0
        assert X_train.shape[0] == y_train.shape[
            0], "X and y must have the same data points."
        self.W = np.random.normal(size=(X_train.shape[1], y_train.shape[1]))
        self._train(X_train, y_train)

    def predict(self, X_test):
        return np.argmax(X_test.dot(self.W), axis=1)


if __name__ == '__main__':
    load_dataset_mnist("../libs")
    mndata = MNIST('../libs/data_mnist')

    images, labels = mndata.load_training()
    images, labels = preprocess_data(images, labels)
    optimizer = SGD(0.01)
    batch_size = 64
    softmax = SoftmaxRegression(optimizer=optimizer,
                                epochs=20,
                                batch_size=batch_size)
    softmax.train(images, labels)

    images_test, labels_test = mndata.load_testing()
    images_test, labels_test = preprocess_data(images_test,
                                               labels_test,
                                               test=True)

    pred = softmax.predict(images_test)

    print("Accuracy:", len(pred[labels_test == pred]) / len(pred))
    from sklearn.metrics.classification import confusion_matrix
コード例 #9
0
def main(use_keras=False):
    start_token = " "
    pad_token = "#"

    data_path = "D:/ml_from_scratch/recurrent_neural_network/names"

    with open(data_path) as f:
        names = f.read()[:-1].split('\n')
        names = [start_token + name for name in names]
    print('number of samples:', len(names))
    MAX_LENGTH = max(map(len, names))
    print("max length:", MAX_LENGTH)

    tokens = set()
    for name in names:
        temp_name = set(list(name))
        for t_n in temp_name:
            tokens.add(t_n)

    tokens = [pad_token] + list(tokens)
    n_tokens = len(tokens)
    print('n_tokens:', n_tokens)

    token_to_id = dict()
    for ind, token in enumerate(tokens):
        token_to_id[token] = ind
    print(token_to_id[pad_token])

    def to_matrix(names,
                  max_len=None,
                  pad=token_to_id[pad_token],
                  dtype=np.int32):
        """Casts a list of names into rnn-digestable padded matrix"""

        max_len = max_len or max(map(len, names))
        names_ix = np.zeros([len(names), max_len], dtype) + pad
        for i in range(len(names)):
            name_ix = list(map(token_to_id.get, names[i]))
            names_ix[i, :len(name_ix)] = name_ix

        return names_ix

    matrix_sequences = to_matrix(names)

    train_X = matrix_sequences[:, :-1]
    m, length = matrix_sequences.shape
    input_sequences = np.zeros(shape=(m, length, n_tokens))
    for i in range(m):
        input_sequences[i] = to_categorical(matrix_sequences[i],
                                            n_tokens,
                                            dtype='int32')
    del matrix_sequences
    if not use_keras:
        train_X = input_sequences[:, :-1, :]
    train_Y = input_sequences[:, 1:, :]

    epochs = 20
    batch_size = 32
    learning_rate = 0.01
    if use_keras:
        from keras.optimizers import SGD as SGDKeras
        optimizer = SGDKeras(lr=learning_rate)
        rnn = RNNKeras(hidden_units=64,
                       epochs=epochs,
                       optimizer=optimizer,
                       batch_size=batch_size)
    else:
        optimizer = SGD(alpha=learning_rate)
        rnn = RecurrentNeuralNetwork(hidden_units=64,
                                     epochs=epochs,
                                     optimizer=optimizer,
                                     batch_size=batch_size)
    rnn.train(train_X, train_Y)