def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 128 # The dimension of the data embedding optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = SquareLoss self.encoder = self.build_encoder(optimizer, loss_function) self.decoder = self.build_decoder(optimizer, loss_function) self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.autoencoder.layers.extend(self.encoder.layers) self.autoencoder.layers.extend(self.decoder.layers) print () self.autoencoder.summary(name="Variational Autoencoder")
def run_prediction(): od_sample_queue = collections.OrderedDict(sorted(samples_queue.items())) punctuation_samples = np.array( FeatureGen.punctuation_marks(od_sample_queue.values())) backspace_samples = np.array( FeatureGen.backspace_count(od_sample_queue.values())) type_pace_samples = np.array(FeatureGen.type_rate(od_sample_queue)) new_data = prepare_data(punctuation_samples, type_pace_samples, backspace_samples) nn = NeuralNetwork() model = nn.load_model(JSON_PATH, WEIGHTS_PATH) print model.predict_proba(new_data)
""" Method which generates sequence of numbers """ X = np.zeros([nums, 10, 20], dtype=float) y = np.zeros([nums, 10, 20], dtype=float) for i in range(nums): start = np.random.randint(0, 10) num_seq = np.arange(start, start+10) X[i] = to_categorical(num_seq, n_col=20) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y X, y = gen_multiplication_series(3000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4) # Model definition clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy) clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61))) clf.add(Activation('softmax')) clf.summary("Recurrent Neural Network") # Print a problem instance and the correct solution tmp_X = np.argmax(X_train[0], axis=1) tmp_y = np.argmax(y_train[0], axis=1) print ("Number Series Problem:") print ("X = [" + " ".join(tmp_X.astype("str")) + "]") print ("y = [" + " ".join(tmp_y.astype("str")) + "]") print () train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512) # Predict labels of the test data
y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # Reshape X to (n_samples, channels, height, width) X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add( Conv2D(n_filters=16, filter_shape=(3, 3), stride=1, input_shape=(1, 8, 8), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1, padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25))
def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim,))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder
class Autoencoder(): """An Autoencoder with deep fully-connected neural nets. Training Data: MNIST Handwritten Digits (28x28 images) """ def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 128 # The dimension of the data embedding optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = SquareLoss self.encoder = self.build_encoder(optimizer, loss_function) self.decoder = self.build_decoder(optimizer, loss_function) self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.autoencoder.layers.extend(self.encoder.layers) self.autoencoder.layers.extend(self.decoder.layers) print () self.autoencoder.summary(name="Variational Autoencoder") def build_encoder(self, optimizer, loss_function): encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) encoder.add(Dense(512, input_shape=(self.img_dim,))) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(256)) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(self.latent_dim)) return encoder def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim,))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder def train(self, n_epochs, batch_size=128, save_interval=50): # mnist = fetch_mldata('MNIST original') mnist = fetch_openml('mnist_784', version=1, cache=True) X = mnist.data y = mnist.target # Rescale [-1, 1] X = (X.astype(np.float32) - 127.5) / 127.5 for epoch in range(n_epochs): # Select a random half batch of images idx = np.random.randint(0, X.shape[0], batch_size) imgs = X[idx] # Train the Autoencoder loss, _ = self.autoencoder.train_on_batch(imgs, imgs) # Display the progress print ("%d [D loss: %f]" % (epoch, loss)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch, X) def save_imgs(self, epoch, X): r, c = 5, 5 # Grid size # Select a random half batch of images idx = np.random.randint(0, X.shape[0], r*c) imgs = X[idx] # Generate images and reshape to image shape gen_imgs = self.autoencoder.predict(imgs).reshape((-1, self.img_rows, self.img_cols)) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) plt.suptitle("Autoencoder") cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray') axs[i,j].axis('off') cnt += 1 fig.savefig("ae_%d.png" % epoch) plt.close()