Ejemplo n.º 1
0
    def build_encoder(self, optimizer, loss_function):

        encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
        encoder.add(Dense(512, input_shape=(self.img_dim, ), first_layer=True))
        encoder.add(Activation('leaky_relu'))
        #encoder.add(BatchNormalization(momentum=0.8))
        encoder.add(Dense(256))
        encoder.add(Activation('leaky_relu'))
        #encoder.add(BatchNormalization(momentum=0.8))
        encoder.add(Dense(self.latent_dim, latent_layer=True))

        return encoder
def main():
    def loadMNIST(prefix, folder):
        intType = np.dtype('int32').newbyteorder('>')
        nMetaDataBytes = 4 * intType.itemsize

        data = np.fromfile(folder + "/" + prefix + '-images-idx3-ubyte', dtype='ubyte')
        magicBytes, nImages, width, height = np.frombuffer(data[:nMetaDataBytes].tobytes(), intType)
        data = data[nMetaDataBytes:].astype(dtype='float32').reshape([nImages, width, height])

        labels = np.fromfile(folder + "/" + prefix + '-labels-idx1-ubyte',
                             dtype='ubyte')[2 * intType.itemsize:]

        return data, labels

    train, train_labels = loadMNIST("train", "./mnist/")
    test, test_labels = loadMNIST("t10k", "./mnist/")

    train = train.reshape((len(train), 784)) / 255.
    test = test.reshape((len(test), 784)) / 255.

    # print(train[0])

    neural_net = NeuralNetwork()
    neural_net.add(InputLayer(784))
    neural_net.add(DenseLayer(30, activation="relu"))
    neural_net.add(DenseLayer(10, activation="softmax"))
    neural_net.initialize_weights(initializer="He")

    # print(neural_net.weights[-1][:, 0])

    neural_net.train(train, labels=train_labels.astype(int), loss="cross_entropy", learning_rate=0.1, epochs=10, mini_batch_size=8)
Ejemplo n.º 3
0
y = to_categorical(y.astype("int"))

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# print(X_train.shape)
X_train = X_train[:256]
y_train = y_train[:256]
X_train = X_train.reshape((-1, 1, 8, 8))
X_test = X_test.reshape((-1, 1, 8, 8))
X_test = X_train[:256]
y_test = y_train[:256]

# Model
model = NeuralNetwork(SquareLoss(), (X_test, y_test))
model.add(
    Conv2D(16,
           filter_shape=(3, 3),
           stride=1,
           input_shape=(1, 8, 8),
           padding='same'))
model.add(Activation('relu'))
model.add(Dropout(p=0.2))
model.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1, padding='same'))
model.add(Activation('relu'))
model.add(Dropout(p=0.2))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(10))
model.add(Activation('softmax'))

train_err = model.fit(X_train, y_train, n_epochs=5, batch_size=256)
Ejemplo n.º 4
0
    def build_decoder(self, optimizer, loss_function):

        decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
        decoder.add(Dense(256, input_shape=(self.latent_dim, )))
        decoder.add(Activation('leaky_relu'))
        decoder.add(BatchNormalization(momentum=0.8))
        decoder.add(Dense(512))
        decoder.add(Activation('leaky_relu'))
        decoder.add(BatchNormalization(momentum=0.8))
        decoder.add(Dense(self.img_dim))
        decoder.add(Activation('tanh'))

        return decoder