class main(): def __init__(self): self.train_img, self.train_lbl = load_data(1) self.input_shape = (28, 28, 1) # Hyper parameter values obtained after tuning using gird search self.eta = 1e-3 self.epochs = 20 self.batch_size = 64 self.kernel_size = (4, 4) self.model = CNN() def train(self, activation): history = {} if activation is None: activation = [] activation.append('relu') for act in activation: print("\nTraining for {} activation".format(act)) self.model.create_classifier(input_shape=self.input_shape, activation=act, eta=self.eta, kernel_size=self.kernel_size) history[act] = self.model.train_classifier( self.train_img, self.train_lbl, epochs=self.epochs, batch_size=self.batch_size, activation=act) #summarize history for accuracy plt.figure(str(act) + ' acc') plt.plot(history[act].history['acc']) plt.plot(history[act].history['val_acc']) plt.title(str(act) + ' model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='lower right') plt.savefig(str(act) + '_accuracy.png') # summarize history for loss plt.figure(str(act) + ' loss') plt.plot(history[act].history['loss']) plt.plot(history[act].history['val_loss']) plt.title(str(act) + ' model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper right') plt.savefig(str(act) + '_loss.png') legend = [] for values in history: plt.figure('accuracy') plt.plot(history[values].history['acc']) legend.append(values) plt.xlabel('epoch') plt.ylabel('accuracy') plt.legend(legend, loc='lower right') plt.title('Accuracies for different activations') plt.savefig('combined train accuracy.png') for values in history: plt.figure('val accuracy') plt.plot(history[values].history['val_acc']) plt.xlabel('epoch') plt.ylabel('accuracy') plt.legend(legend, loc='lower right') plt.title('Validation Accuracies for different activations') plt.savefig('combined val accuracy.png') for values in history: plt.figure('loss') plt.plot(history[values].history['loss']) plt.xlabel('epoch') plt.ylabel('loss') plt.legend(legend, loc='upper right') plt.title('Loss for different activations') plt.savefig('combined train loss.png') for values in history: plt.figure('val loss') plt.plot(history[values].history['val_loss']) plt.xlabel('epoch') plt.ylabel('loss') plt.legend(legend, loc='upper right') plt.title('Validation loss for different activations') plt.savefig('combined val loss.png') def tune(self): self.model.create_classifier(input_shape=self.input_shape, eta=self.eta, kernel_size=self.kernel_size) grid_result = self.model.tune_hyperparameters(self.train_img, self.train_lbl) def test(self, activation): if activation is None: activation = [] activation.append('relu') test_img, test_lbl = load_data(0) for act in activation: print("Test for " + act + ":") test_accuracy, test_loss, predictions = self.model.test_classifier( test_img, test_lbl, act) print("\n") predictions = np.round(predictions) predictions = predictions.astype(int) df = pd.DataFrame(predictions) df.to_csv("mnist.csv", header=None, index=None)