def model(n_inputs, n_outputs): clf = NeuralNetwork(optimizer=Adam(), loss=SquareLoss) clf.add(Dense(64, input_shape=(n_inputs, ))) clf.add(Activation('relu')) clf.add(Dense(n_outputs)) return clf
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add( Conv2D(32, filter_shape=(3, 3), stride=2, input_shape=self.img_shape, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, filter_shape=(3, 3), stride=2, padding='same')) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(128, filter_shape=(3, 3), stride=2, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(256, filter_shape=(3, 3), stride=1, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(512, input_shape=(self.img_dim, ))) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(256, input_shape=(self.latent_dim, ))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.img_dim)) model.add(Activation('tanh')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(128 * 7 * 7, input_shape=(100, ))) model.add(Activation('leaky_relu')) model.add(Reshape((128, 7, 7))) model.add(UpSampling2D()) model.add(Conv2D(128, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(UpSampling2D()) model.add(Conv2D(64, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(Conv2D(1, filter_shape=(3, 3), padding='same')) model.add(Activation("tanh")) return model
# Rescaled labels {-1, 1} rescaled_y_train = 2*y_train - np.ones(np.shape(y_train)) rescaled_y_test = 2*y_test - np.ones(np.shape(y_test)) # ....... # SETUP # ....... adaboost = Adaboost(n_clf = 8) naive_bayes = NaiveBayes() knn = KNN(k=4) logistic_regression = LogisticRegression() mlp = NeuralNetwork(n_iterations=300, optimizer=Adam(), loss=CrossEntropy, batch_size=50) mlp.add(Dense(input_shape=(n_features,), n_units=64)) mlp.add(Activation('relu')) mlp.add(Dense(n_units=64)) mlp.add(Activation('relu')) mlp.add(Dense(n_units=2)) mlp.add(Activation('softmax')) perceptron = Perceptron() decision_tree = ClassificationTree() random_forest = RandomForest(n_estimators=50) support_vector_machine = SupportVectorMachine() lda = LDA() gbc = GradientBoostingClassifier(n_estimators=50, learning_rate=.9, max_depth=2) xgboost = XGBoost(n_estimators=50, learning_rate=0.5) # ........ # TRAIN
def main(): data = datasets.load_digits() X = data.data y = data.target n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) optimizer = Adam() #----- # MLP #----- # clf = NeuralNetwork(optimizer=optimizer, # loss=CrossEntropy, # validation_data=(X_test, y_test)) # clf.add(Dense(n_hidden, input_shape=(8*8,))) # clf.add(Activation('leaky_relu')) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(10)) # clf.add(Activation('softmax')) # print () # clf.summary(name="MLP") # clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # clf.plot_errors() # y_pred = np.argmax(clf.predict(X_test), axis=1) # accuracy = accuracy_score(y_test, y_pred) # print ("Accuracy:", accuracy) #---------- # Conv Net #---------- X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add( Conv2D(n_filters=16, filter_shape=(3, 3), input_shape=(1, 8, 8), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Flatten()) clf.add(Dense(256)) clf.add(Activation('relu')) clf.add(Dropout(0.5)) clf.add(BatchNormalization()) clf.add(Dense(10)) clf.add(Activation('softmax')) print() clf.summary(name="ConvNet") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() # Make a prediction of the test set y_pred = np.argmax(clf.predict(X_test), axis=1) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) X_test = X_test.reshape(-1, 8 * 8) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=np.unique(y))
def main(): optimizer = Adam() #----- # MLP #----- data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add(Dense(n_hidden, input_shape=(8*8,))) clf.add(Activation('leaky_relu')) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(n_hidden)) clf.add(Activation('leaky_relu')) clf.add(Dropout(0.25)) clf.add(Dense(10)) clf.add(Activation('softmax')) print () clf.summary(name="MLP") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() # Predict labels of the test data y_pred = np.argmax(clf.predict(X_test), axis=1) y_test = np.argmax(y_test, axis=1) accuracy = accuracy_score(y_test, y_pred) print ("Accuracy:", accuracy) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=range(10))
def main(): data = datasets.load_digits() X = data.data y = data.target n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) optimizer = Adam() #----- # MLP #----- # clf = NeuralNetwork(optimizer=optimizer, # loss=CrossEntropy, # validation_data=(X_test, y_test)) # clf.add(Dense(n_hidden, input_shape=(8*8,))) # clf.add(Activation('leaky_relu')) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(n_hidden)) # clf.add(Activation('leaky_relu')) # clf.add(Dropout(0.25)) # clf.add(Dense(10)) # clf.add(Activation('softmax')) # clf.fit(X_train, y_train, n_iterations=50, batch_size=256) # clf.plot_errors() # y_pred = np.argmax(clf.predict(X_test), axis=1) # accuracy = accuracy_score(y_test, y_pred) # print ("Accuracy:", accuracy) #---------- # Conv Net #---------- X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add( Conv2D(n_filters=16, filter_shape=(3, 3), input_shape=(1, 8, 8), padding='same')) clf.add(Activation('relu')) clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), padding='same')) clf.add(Activation('relu')) clf.add(Flatten()) clf.add(Dense(128)) clf.add(Activation('relu')) clf.add(Dense(10)) clf.add(Activation('softmax')) clf.fit(X_train, y_train, n_iterations=50, batch_size=256) clf.plot_errors() y_pred = np.argmax(clf.predict(X_test), axis=1) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) X_test = X_test.reshape(-1, 8 * 8) # Reduce dimension to two using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=np.unique(y))
def main(): #---------- # Conv Net #---------- optimizer = Adam() data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) n_samples = np.shape(X) n_hidden = 512 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # Reshape X to (n_samples, channels, height, width) X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add( Conv2D(n_filters=16, filter_shape=(3, 3), input_shape=(1, 8, 8), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), padding='same')) clf.add(Activation('relu')) clf.add(Dropout(0.25)) clf.add(BatchNormalization()) clf.add(Flatten()) clf.add(Dense(256)) clf.add(Activation('relu')) clf.add(Dropout(0.5)) clf.add(BatchNormalization()) clf.add(Dense(10)) clf.add(Activation('softmax')) print() clf.summary(name="ConvNet") train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256) # Training and validation error plot n = len(train_err) training, = plt.plot(range(n), train_err, label="Training Error") validation, = plt.plot(range(n), val_err, label="Validation Error") plt.legend(handles=[training, validation]) plt.title("Error Plot") plt.ylabel('Error') plt.xlabel('Iterations') plt.show() # Predict labels of the test data y_pred = np.argmax(clf.predict(X_test), axis=1) y_test = np.argmax(y_test, axis=1) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # Flatten data set X_test = X_test.reshape(-1, 8 * 8) # Reduce dimension to 2D using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))