def model_builder(n_inputs, n_outputs): model = NeuralNetwork(optimizer=Adam(), loss=CrossEntropy) model.add(Dense(16, input_shape=(n_inputs, ))) model.add(Activation('relu')) model.add(Dense(n_outputs)) model.add(Activation('softmax')) return model
def model_builder(n_inputs, n_outputs): model = NeuralNetwork(optimizer=Adam(), loss=CrossEntropy) model.add(Dense(16, input_shape=(n_inputs,))) model.add(Activation('relu')) model.add(Dense(n_outputs)) model.add(Activation('softmax')) return model
def main(): # define the model components = 3 optimizer = Adam() loss = MdnLoss(num_components=components, output_dim=1) clf = NeuralNetwork(optimizer=optimizer, loss=loss) clf.add(Dense(n_units=26, input_shape=(1, ))) clf.add(Activation('tanh')) clf.add( MDN(input_shape=(26, ), output_shape=(1, ), num_components=components)) clf.summary(name="MDN") # generate 1D regression data (Bishop book, page 273). # Note: P(y|x) is not a nice distribution. # (e.g.) it has three modes for x ~= 0.5 N = 225 X = np.linspace(0, 1, N) Y = X + 0.3 * np.sin(2 * 3.1415926 * X) + np.random.uniform(-0.1, 0.1, N) X, Y = Y, X nb = N # full_batch xbatch = np.reshape(X[:nb], (nb, 1)) ybatch = np.reshape(Y[:nb], (nb, 1)) train_err, val_err = clf.fit(xbatch, ybatch, n_epochs=int(4e3), batch_size=N) plt.plot(train_err, label="Training Error") plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() # utility function for creating contour plot of the predictions n = 15 xx = np.linspace(0, 1, n) yy = np.linspace(0, 1, n) xm, ym = np.meshgrid(xx, yy) loss, acc = clf.test_on_batch(xm.reshape(xm.size, 1), ym.reshape(ym.size, 1)) ypred = clf.loss_function.ypred plt.figure(figsize=(10, 10)) plt.scatter(X, Y, color='g') plt.contour(xm, ym, np.reshape(ypred, (n, n)), levels=np.linspace(ypred.min(), ypred.max(), 20)) plt.xlabel('x') plt.ylabel('y') plt.title('{}-component Gaussian Mixture Model for ' 'P(y|x)'.format(components)) plt.show()
def main(): optimizer = Adadelta() #----- # MLP #----- data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add(Dense(n_hidden, input_shape=(8 * 8, ))) clf.add(Activation('leaky_relu')) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(10)) clf.add(Activation('softmax')) print() clf.summary(name="MLP") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() # Predict labels of the test data y_pred = np.argmax(clf.predict(X_test), axis=1) y_test = np.argmax(y_test, axis=1) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=range(10))
def main(): optimizer = Adam() def gen_mult_ser(nums): """ Method which generates multiplication series """ X = np.zeros([nums, 10, 61], dtype=float) y = np.zeros([nums, 10, 61], dtype=float) for i in range(nums): start = np.random.randint(2, 7) mult_ser = np.linspace(start, start * 10, num=10, dtype=int) X[i] = to_categorical(mult_ser, n_col=61) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y def gen_num_seq(nums): """ Method which generates sequence of numbers """ X = np.zeros([nums, 10, 20], dtype=float) y = np.zeros([nums, 10, 20], dtype=float) for i in range(nums): start = np.random.randint(0, 10) num_seq = np.arange(start, start + 10) X[i] = to_categorical(num_seq, n_col=20) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y X, y = gen_mult_ser(3000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4) # Model definition clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy) clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61))) clf.add(Activation('softmax')) clf.summary("RNN") # Print a problem instance and the correct solution tmp_X = np.argmax(X_train[0], axis=1) tmp_y = np.argmax(y_train[0], axis=1) print("Number Series Problem:") print("X = [" + " ".join(tmp_X.astype("str")) + "]") print("y = [" + " ".join(tmp_y.astype("str")) + "]") print() train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512) # Predict labels of the test data y_pred = np.argmax(clf.predict(X_test), axis=2) y_test = np.argmax(y_test, axis=2) print() print("Results:") for i in range(5): # Print a problem instance and the correct solution tmp_X = np.argmax(X_test[i], axis=1) tmp_y1 = y_test[i] tmp_y2 = y_pred[i] print("X = [" + " ".join(tmp_X.astype("str")) + "]") print("y_true = [" + " ".join(tmp_y1.astype("str")) + "]") print("y_pred = [" + " ".join(tmp_y2.astype("str")) + "]") print() accuracy = np.mean(accuracy_score(y_test, y_pred)) print("Accuracy:", accuracy) training = plt.plot(range(500), train_err, label="Training Error") plt.title("Error Plot") plt.ylabel('Training Error') plt.xlabel('Iterations') plt.show()
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add( Conv2D(32, filter_shape=(3, 3), stride=2, input_shape=self.img_shape, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, filter_shape=(3, 3), stride=2, padding='same')) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, filter_shape=(3, 3), stride=2, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, filter_shape=(3, 3), stride=1, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(128 * 7 * 7, input_shape=(100, ))) model.add(Activation('leaky_relu')) model.add(Reshape((128, 7, 7))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, filter_shape=(3, 3), padding='same')) model.add(Activation("tanh")) return model
def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim, ))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder
def main(): optimizer = Adam() #----- # MLP #----- data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) n_samples, n_features = X.shape n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add(Dense(n_hidden, input_shape=(n_features,))) clf.add(Activation('leaky_relu')) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(10)) clf.add(Activation('softmax')) print () clf.summary(name="MLP") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() _, accuracy = clf.test_on_batch(X_test, y_test) print ("Accuracy:", accuracy) # Reduce dimension to 2D using PCA and plot the results y_pred = np.argmax(clf.predict(X_test), axis=1) Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=range(10))
def main(): #---------- # Conv Net #---------- optimizer = Adam() data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # Reshape X to (n_samples, channels, height, width) X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add( Conv2D(n_filters=16, filter_shape=(3, 3), stride=1, input_shape=(1, 8, 8), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1, padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Flatten()) clf.add(Dense(256)) clf.add(Activation('relu')) clf.add(Dropout(0.4)) clf.add(BatchNormalization()) clf.add(Dense(10)) clf.add(Activation('softmax')) print() clf.summary(name="ConvNet") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() _, accuracy = clf.test_on_batch(X_test, y_test) print("Accuracy:", accuracy) y_pred = np.argmax(clf.predict(X_test), axis=1) X_test = X_test.reshape(-1, 8 * 8) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))
def main(): #---------- # Conv Net #---------- optimizer = Adam() data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # Reshape X to (n_samples, channels, height, width) X_train = X_train.reshape((-1,1,8,8)) X_test = X_test.reshape((-1,1,8,8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add(Conv2D(n_filters=16, filter_shape=(3,3), input_shape=(1,8,8), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Conv2D(n_filters=32, filter_shape=(3,3), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Flatten()) clf.add(Dense(256)) clf.add(Activation('relu')) clf.add(Dropout(0.4)) clf.add(BatchNormalization()) clf.add(Dense(10)) clf.add(Activation('softmax')) print () clf.summary(name="ConvNet") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() _, accuracy = clf.test_on_batch(X_test, y_test) print ("Accuracy:", accuracy) y_pred = np.argmax(clf.predict(X_test), axis=1) X_test = X_test.reshape(-1, 8*8) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(512, input_shape=(self.img_dim,))) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(256, input_shape=(self.latent_dim,))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.img_dim)) model.add(Activation('tanh')) return model
def model(n_inputs, n_outputs): clf = NeuralNetwork(optimizer=Adam(), loss=SquareLoss) clf.add(Dense(64, input_shape=(n_inputs, ))) clf.add(Activation('relu')) clf.add(Dense(n_outputs)) return clf