def build_discriminator(self, optimizer, loss_function):

        model = NeuralNetwork(optimizer=optimizer, loss=loss_function)

        model.add(
            Conv2D(32,
                   filter_shape=(3, 3),
                   stride=2,
                   input_shape=self.img_shape,
                   padding='same'))
        model.add(Activation('leaky_relu'))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, filter_shape=(3, 3), stride=2, padding='same'))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(Activation('leaky_relu'))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, filter_shape=(3, 3), stride=2, padding='same'))
        model.add(Activation('leaky_relu'))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, filter_shape=(3, 3), stride=1, padding='same'))
        model.add(Activation('leaky_relu'))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(2))
        model.add(Activation('softmax'))

        return model
Beispiel #2
0
    def build_encoder(self, optimizer, loss_function):

        encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
        encoder.add(Dense(512, input_shape=(self.img_dim, )))
        encoder.add(Activation('leaky_relu'))
        encoder.add(BatchNormalization(momentum=0.8))
        encoder.add(Dense(256))
        encoder.add(Activation('leaky_relu'))
        encoder.add(BatchNormalization(momentum=0.8))
        encoder.add(Dense(self.latent_dim))

        return encoder
    def build_generator(self, optimizer, loss_function):
        
        model = NeuralNetwork(optimizer=optimizer, loss=loss_function)

        model.add(Dense(256, input_shape=(self.latent_dim,)))
        model.add(Activation('leaky_relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(Activation('leaky_relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(Activation('leaky_relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(self.img_dim))
        model.add(Activation('tanh'))

        return model
    def build_generator(self, optimizer, loss_function):

        model = NeuralNetwork(optimizer=optimizer, loss=loss_function)

        model.add(Dense(128 * 7 * 7, input_shape=(100, )))
        model.add(Activation('leaky_relu'))
        model.add(Reshape((128, 7, 7)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, filter_shape=(3, 3), padding='same'))
        model.add(Activation("leaky_relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, filter_shape=(3, 3), padding='same'))
        model.add(Activation("leaky_relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, filter_shape=(3, 3), padding='same'))
        model.add(Activation("tanh"))

        return model
Beispiel #5
0
def main():

    #----------
    # Conv Net
    #----------

    optimizer = Adam()

    data = datasets.load_digits()
    X = data.data
    y = data.target

    # Convert to one-hot encoding
    y = to_categorical(y.astype("int"))

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        seed=1)

    # Reshape X to (n_samples, channels, height, width)
    X_train = X_train.reshape((-1, 1, 8, 8))
    X_test = X_test.reshape((-1, 1, 8, 8))

    clf = NeuralNetwork(optimizer=optimizer,
                        loss=CrossEntropy,
                        validation_data=(X_test, y_test))

    clf.add(
        Conv2D(n_filters=16,
               filter_shape=(3, 3),
               stride=1,
               input_shape=(1, 8, 8),
               padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1,
                   padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Flatten())
    clf.add(Dense(256))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.4))
    clf.add(BatchNormalization())
    clf.add(Dense(10))
    clf.add(Activation('softmax'))

    print()
    clf.summary(name="ConvNet")

    train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256)

    # Training and validation error plot
    n = len(train_err)
    training, = plt.plot(range(n), train_err, label="Training Error")
    validation, = plt.plot(range(n), val_err, label="Validation Error")
    plt.legend(handles=[training, validation])
    plt.title("Error Plot")
    plt.ylabel('Error')
    plt.xlabel('Iterations')
    plt.show()

    _, accuracy = clf.test_on_batch(X_test, y_test)
    print("Accuracy:", accuracy)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    X_test = X_test.reshape(-1, 8 * 8)
    # Reduce dimension to 2D using PCA and plot the results
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Convolutional Neural Network",
                      accuracy=accuracy,
                      legend_labels=range(10))