Exemple #1
0
def main():
    num_classes = 8000
    hidden_dim = 100
    # save_data("./data/reddit/sent.pkl")
    x_train, y_train, index_to_word, word_to_index = load_data(
        "./data/reddit/sent.pkl")

    # To categorical (one-hot)
    x_tmp, y_tmp = [], []
    for i in range(len(x_train)):
        x_t = to_categorical(x_train[i], num_classes=num_classes)
        y_t = to_categorical(y_train[i], num_classes=num_classes)
        x_tmp.append(x_t)
        y_tmp.append(y_t)
    x_train = x_tmp
    y_train = y_tmp

    batch_size = 1  #int(len(x_train) / 8)
    epochs = 100

    # Define architecture
    l_in = Input(shape=(None, num_classes))
    l = l_in
    # l = Embedding(l, input_dim=max_words, output_dim=8, input_length=max_length)
    l = SimpleRNN(l,
                  hidden_dim=100,
                  stateful=False,
                  return_sequences=True,
                  unroll=False,
                  bptt_truncate=4)
    l_out = Softmax(l)

    # Build network
    mymodel = Net()
    mymodel.build(
        l_in=[l_in],
        l_out=[l_out],
        optimizer=Adam(lr=0.01),
        losses=[losses.CrossEntropy()],
        metrics=[[metrics.CategoricalAccuracy()]],
        debug=False,
        smart_derivatives=True,
    )

    # Print model
    mymodel.summary()

    # Train
    mymodel.fit([x_train], [y_train],
                x_test=None,
                y_test=None,
                batch_size=batch_size,
                epochs=epochs,
                evaluate_epoch=False,
                print_rate=1)

    asdasd = 33
Exemple #2
0
def main():
    # Get data
    (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()

    # Pre-processing
    # Normalize
    x_train = x_train / 255.0
    x_test = x_test / 255.0

    # Classes to categorical
    num_classes = 10
    y_train = utils.to_categorical(y_train, num_classes=num_classes)
    y_test = utils.to_categorical(y_test, num_classes=num_classes)

    # Shuffle dataset
    x_train, y_train = utils.shuffle_dataset(x_train, y_train)
    x_test, y_test = utils.shuffle_dataset(x_test, y_test)

    # Params *********************************
    batch_size = int(len(x_train) / 10)
    epochs = 10

    # Define architecture
    l_in = Input(shape=x_train[0].shape)
    l = Reshape(l_in, shape=(28 * 28, ))
    l = PRelu(Dense(l, 1024))
    l = LeakyRelu(Dense(l, 1024), alpha=0.1)
    l_out = Softmax(Dense(l, num_classes))

    # Build network
    mymodel = Net()
    mymodel.build(
        l_in=[l_in],
        l_out=[l_out],
        optimizer=Adam(lr=0.001),
        losses=[losses.CrossEntropy()],
        metrics=[[metrics.CategoricalAccuracy()]],
        debug=False,
        smart_derivatives=False,
    )

    # Print model
    mymodel.summary()

    # Train
    mymodel.fit([x_train], [y_train],
                x_test=[x_test],
                y_test=[y_test],
                batch_size=batch_size,
                epochs=epochs,
                evaluate_epoch=False,
                print_rate=1)
    def test_iris_model1(self):
        from sklearn import datasets

        # Get data
        iris = datasets.load_iris()
        X = iris.data  # we only take the first two features.
        Y = iris.target

        # Pre-processing
        # Standarize
        X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)

        # Classes to categorical
        num_classes = 3
        Y = utils.to_categorical(Y, num_classes=num_classes)

        # Shuffle dataset
        idxs = np.arange(len(X))
        np.random.shuffle(idxs)
        X, Y = X[idxs], Y[idxs]

        # Select train/test
        c = 1  # 0.8
        tr_size = int(len(X) * c)
        x_train, y_train = X[:tr_size], Y[:tr_size]

        # Define architecture
        l_in = Input(shape=x_train[0].shape)
        l = Dense(l_in, 20)
        l = Relu(l)
        l = Dense(l, 15)
        l = Relu(l)
        l = Dense(l, num_classes)
        l_out = Softmax(l)

        # Build network
        model = Net()
        model.build(l_in=[l_in],
                    l_out=[l_out],
                    optimizer=Adam(lr=10e-2),
                    losses=[losses.CrossEntropy()],
                    metrics=[[metrics.CategoricalAccuracy()]],
                    debug=False)

        # Check gradient
        passed = gradient_check(model, [x_train], [y_train],
                                batch_size=int(len(x_train) / 10),
                                max_samples=25)
        self.assertTrue(passed)
    def test_mnist_model_mlp(self):
        from keras import datasets

        # Get data
        (x_train, y_train), (_, _) = datasets.mnist.load_data()

        # Pre-processing
        # Normalize
        x_train = x_train / 255.0

        # Classes to categorical
        num_classes = 10
        y_train = utils.to_categorical(y_train, num_classes=num_classes)

        # Shuffle dataset
        x_train, y_train = utils.shuffle_dataset(x_train, y_train)

        l_in = Input(shape=x_train[0].shape)
        l = Reshape(l_in, shape=(28 * 28, ))
        l1 = Relu(Dense(l, 512))
        # l2 = Relu(Dense(l1, 512))
        # l = Add([l1, l2])
        # l = BatchNorm(l)
        l = Relu(Dense(l1, 512))
        l_out = Softmax(Dense(l, num_classes))

        # Build network
        model = Net()
        model.build(
            l_in=[l_in],
            l_out=[l_out],
            optimizer=Adam(lr=10e-3),
            losses=[losses.CrossEntropy()],
            metrics=[[metrics.CategoricalAccuracy()]],
            debug=False,
            smart_derivatives=False,
        )

        # Check gradient
        passed = gradient_check(model, [x_train], [y_train],
                                batch_size=int(len(x_train) / 10),
                                max_samples=5,
                                verbose=2)
        self.assertTrue(passed)
Exemple #5
0
def main():
    # Get data
    iris = datasets.load_iris()
    X = iris.data  # we only take the first two features.
    Y = iris.target

    # Pre-processing
    # Standarize
    X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)

    # Classes to categorical
    num_classes = 3
    Y = utils.to_categorical(Y, num_classes=num_classes)

    # Shuffle dataset
    idxs = np.arange(len(X))
    np.random.shuffle(idxs)
    X, Y = X[idxs], Y[idxs]

    # Select train/test
    c = 0.8
    tr_size = int(len(X) * c)
    x_train, y_train = X[:tr_size], Y[:tr_size]
    x_test, y_test = X[tr_size:], Y[tr_size:]

    # Params *********************************
    batch_size = int(len(x_train) / 5)
    epochs = 500

    # Define architecture
    l_in = Input(shape=x_train[0].shape)
    l = Dense(l_in,
              20,
              kernel_regularizer=L2(lmda=0.01),
              bias_regularizer=L1(lmda=0.01))
    l = Relu(l)
    l = Dense(l, 15)
    l = BatchNorm(l)
    l = Dropout(l, 0.1)
    l = Relu(l)
    l = Dense(l, num_classes)
    l_out = Softmax(l)

    # Build network
    mymodel = Net()
    mymodel.build(l_in=[l_in],
                  l_out=[l_out],
                  optimizer=Adam(lr=10e-2),
                  losses=[losses.CrossEntropy()],
                  metrics=[[metrics.CategoricalAccuracy()]],
                  debug=False)

    # Print model
    mymodel.summary()

    # Train
    mymodel.fit([x_train], [y_train],
                x_test=[x_test],
                y_test=[y_test],
                batch_size=batch_size,
                epochs=epochs,
                evaluate_epoch=True,
                print_rate=10)

    # Save mode
    mymodel.save("./trained/trained_iris.pkl", save_grads=True)

    # Evaluate
    print("\n----------------------")
    print("Evaluation:")
    lo, me = mymodel.evaluate([x_test], [y_test], batch_size=batch_size)
    str_eval = mymodel._format_eval(lo, me)
    print(f"- Losses[{', '.join(str_eval[0])}]")
    print(f"- Metrics[{'; '.join(str_eval[1])}]")
    def test_mnist_model_conv(self):
        from keras import datasets

        # Get data
        (x_train, y_train), (_, _) = datasets.mnist.load_data()

        # Pre-processing

        # Add channel dimension
        x_train = np.expand_dims(x_train, axis=1)

        # Normalize
        x_train = x_train / 255.0

        # Classes to categorical
        num_classes = 10
        y_train = utils.to_categorical(y_train, num_classes=num_classes)

        # Shuffle dataset
        x_train, y_train = utils.shuffle_dataset(x_train, y_train)

        # Define architecture
        l_in = Input(shape=x_train[0].shape)
        l = l_in

        l = Conv2D(l,
                   filters=2,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding="none")
        l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
        l = Relu(l)
        # l = GaussianNoise(l, stddev=0.1)

        # l = Conv2D(l, filters=4, kernel_size=(3, 3), strides=(1, 1), padding="same")
        # l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
        # l = Relu(l)

        # l = DepthwiseConv2D(l, kernel_size=(3, 3), strides=(1, 1), padding="none")
        # l = PointwiseConv2D(l, filters=1)
        # l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
        # l = Relu(l)

        l = Reshape(l, shape=(-1))
        l = Dense(l,
                  num_classes,
                  kernel_initializer=initializers.RandomUniform())
        l_out = Softmax(l)

        # Build network
        model = Net()
        model.build(
            l_in=[l_in],
            l_out=[l_out],
            optimizer=Adam(lr=10e-2),
            losses=[losses.CrossEntropy()],
            metrics=[[metrics.CategoricalAccuracy()]],
            debug=False,
            smart_derivatives=False,
        )

        # Check gradient
        passed = gradient_check(model, [x_train], [y_train],
                                batch_size=int(len(x_train) / 10),
                                max_samples=5,
                                verbose=2)
        self.assertTrue(passed)
Exemple #7
0
def main():
    # Get data
    (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()

    # Pre-processing
    # Normalize
    x_train = x_train / 255.0
    x_test = x_test / 255.0

    # Classes to categorical
    num_classes = 10

    y_train_label = y_train.reshape((-1, 1))
    y_test_label = y_test.reshape((-1, 1))
    y_train = utils.to_categorical(y_train, num_classes=num_classes)
    y_test = utils.to_categorical(y_test, num_classes=num_classes)

    # Shuffle dataset
    x_train, y_train = utils.shuffle_dataset(x_train, y_train)
    x_test, y_test = utils.shuffle_dataset(x_test, y_test)

    # Params *********************************
    batch_size = int(len(x_train) / 10)
    epochs = 10

    # Define architecture
    l_in1 = Input(shape=x_train[0].shape)
    l_in2 = Input(shape=x_train[0].shape)
    l_in3 = Input(shape=x_train[0].shape)

    l1 = Reshape(l_in1, shape=(28 * 28, ))
    l2 = Reshape(l_in2, shape=(28 * 28, ))
    l3 = Reshape(l_in2, shape=(28 * 28, ))

    l1 = Tanh(Dense(l1, 300))
    l2 = Relu(Dense(l2, 300))
    l3 = Sigmoid(Dense(l3, 300))

    l_mid1 = Add([l1, l2])
    l_mid2 = Add([l1, l3])

    l1 = Relu(Dense(l_mid1, 300))
    l2 = Relu(Dense(l_mid2, 300))

    l2 = Add([l1, l2])
    l3 = Add([l2, l3])
    l1 = BatchNorm(l1)

    l_out1 = Softmax(Dense(l1, num_classes))
    l_out2 = Softmax(Dense(l2, num_classes))
    l_out3 = Relu(Dense(l3, 1))

    # Build network
    mymodel = Net()
    mymodel.build(
        l_in=[l_in1, l_in2, l_in3],
        l_out=[l_out1, l_out2, l_out3],
        optimizer=Adam(lr=0.001),
        losses=[losses.CrossEntropy(),
                losses.CrossEntropy(),
                losses.MSE()],
        metrics=[[metrics.CategoricalAccuracy()],
                 [metrics.CategoricalAccuracy()],
                 [metrics.MSE(), metrics.MAE()]],
        debug=False,
        smart_derivatives=True,
    )

    # Print model
    mymodel.summary()

    # Train
    mymodel.fit([x_train, x_train, x_train], [y_train, y_train, y_train_label],
                x_test=[x_test, x_test, x_test],
                y_test=[y_test, y_test, y_test_label],
                batch_size=batch_size,
                epochs=epochs,
                evaluate_epoch=False,
                print_rate=1)
Exemple #8
0
def main():
    # Get data
    (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()

    # Pre-processing
    # Add channel dimension
    x_train = np.expand_dims(x_train, axis=1)
    x_test = np.expand_dims(x_test, axis=1)

    # Normalize
    x_train = x_train / 255.0
    x_test = x_test / 255.0

    # Classes to categorical
    num_classes = 10
    y_train = utils.to_categorical(y_train, num_classes=num_classes)
    y_test = utils.to_categorical(y_test, num_classes=num_classes)

    # Shuffle dataset
    x_train, y_train = utils.shuffle_dataset(x_train, y_train)
    x_test, y_test = utils.shuffle_dataset(x_test, y_test)

    # Params *********************************
    batch_size = int(len(x_train) / 1)
    epochs = 10

    # Define architecture
    l_in = Input(shape=x_train[0].shape)
    l = l_in

    l = Conv2D(l,
               filters=2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding="none")
    l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
    l = LeakyRelu(l, alpha=0.0)
    l = GaussianNoise(l, stddev=0.1)

    # l = Conv2D(l, filters=4, kernel_size=(3, 3), strides=(1, 1), padding="same")
    # l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
    # l = Relu(l)

    # l = DepthwiseConv2D(l, kernel_size=(3, 3), strides=(1, 1), padding="none")
    # l = PointwiseConv2D(l, filters=1)
    # l = MaxPool2D(l, pool_size=(3, 3), strides=(2, 2), padding="none")
    # l = Relu(l)

    l = Reshape(l, shape=(-1))
    l = Dense(l, num_classes, kernel_initializer=initializers.RandomUniform())
    l_out = Softmax(l)

    # Build network
    mymodel = Net()
    mymodel.build(
        l_in=[l_in],
        l_out=[l_out],
        optimizer=Adam(lr=0.01),
        losses=[losses.CrossEntropy()],
        metrics=[[metrics.CategoricalAccuracy()]],
        debug=False,
        smart_derivatives=True,
    )

    # Print model
    mymodel.summary()

    # Train
    mymodel.fit([x_train], [y_train],
                x_test=[x_test],
                y_test=[y_test],
                batch_size=batch_size,
                epochs=epochs,
                evaluate_epoch=False,
                print_rate=1)