class ConvWrapper(object): """ An example model to show how to make a wrapper object if you don't want to add a setup() method (optional) and an eval() method (required) to your existing model. The alternative to making a wrapper object is to add an eval() method directly to the existing model and give that model object (either the type or an instance) to grid_search(). """ def __init__(self): super(ConvWrapper, self).__init__() self.net = ConvolutionalNeuralNetwork() self.net.initialize_mnist() def setup(self): # all of this could've gone at the beginning of eval(), # but i wanted that to just be the accuracy evaluation, # so we'll train the model here in setup() self.net.create_model_functions(self.dropout_conv_prob, self.dropout_hidden_prob, self.learning_rate) for i in range(self.epochs): for start, end in zip(range(0, len(self.net.trX), self.batch_size), range(self.batch_size, len(self.net.trX), self.batch_size)): self.net.cost = self.net.train(self.net.trX[start:end], self.net.trY[start:end]) def eval(self): return np.mean(np.argmax(self.net.teY, axis = 1) == self.net.predict(self.net.teX))
class ConvWrapper(object): """ An example model to show how to make a wrapper object if you don't want to add a setup() method (optional) and an eval() method (required) to your existing model. The alternative to making a wrapper object is to add an eval() method directly to the existing model and give that model object (either the type or an instance) to grid_search(). """ def __init__(self): super(ConvWrapper, self).__init__() self.net = ConvolutionalNeuralNetwork() self.net.initialize_mnist() def setup(self): # all of this could've gone at the beginning of eval(), # but i wanted that to just be the accuracy evaluation, # so we'll train the model here in setup() self.net.create_model_functions(self.dropout_conv_prob, self.dropout_hidden_prob, self.learning_rate) for i in range(self.epochs): for start, end in zip( range(0, len(self.net.trX), self.batch_size), range(self.batch_size, len(self.net.trX), self.batch_size)): self.net.cost = self.net.train(self.net.trX[start:end], self.net.trY[start:end]) def eval(self): return np.mean( np.argmax(self.net.teY, axis=1) == self.net.predict(self.net.teX))
def calculate_catastrophic_interference(num_tasks, exclude_start, exclude_end, top_layer = "cnn", save_figs = False, verbose = False, epochs = 20, batch_size = 100): excluded = range(exclude_start, exclude_end) task_nums = [i for i in range(num_tasks) if i not in excluded] start = time.time() cnn = ConvolutionalNeuralNetwork() cnn.initialize_mnist() # cnn.trX = cnn.trX[:int(len(cnn.trX)*.2)] # cnn.trY = cnn.trY[:int(len(cnn.trY)*.2)] # cnn.teX = cnn.teX[:int(len(cnn.teX)*.2)] # cnn.teY = cnn.teY[:int(len(cnn.teY)*.2)] cnn.trX, cnn.trY, trXE, trYE = split_dataset(excluded, cnn.trX, cnn.trY) cnn.teX, cnn.teY, teXE, teYE = split_dataset(excluded, cnn.teX, cnn.teY) cnn.create_model_functions() colors = ["#00FF00", "#0000FF", "#00FFFF", "#FFFF00", "#FF00FF", "#000000", "#888888", "#FF8800", "#88FF00", "#FF0088"] print("\nTraining on tasks {0}, excluding tasks {1}".format(task_nums, excluded)) base_accuracies = train_per_task(cnn, num_tasks, verbose, epochs, batch_size) end = time.time() print("Initial training: {0:0.02f}sec".format(end-start)) # base model, trained without excluded tasks # (which are then added back in one of the three top-layer models) # if save_figs: # for t in task_nums: # plt.plot(np.arange(0, epochs), accuracies[t], color = colors[t]) # plt.plot(np.arange(0, epochs), accuracies["total"], color = "#FF0000", marker = "o") # plt.axis([0, epochs-1, 0, 1]) # plt.xlabel("Epoch") # plt.ylabel("Accuracy") # plt.title("Model Accuracy") # plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right") # plt.savefig("figures/trained on {0}, excluded {1}.png".format(task_nums, excluded), bbox_inches = "tight") # plt.close() total_trX = np.concatenate((cnn.trX, trXE), axis = 0) total_trY = np.concatenate((cnn.trY, trYE), axis = 0) total_teX = np.concatenate((cnn.teX, teXE), axis = 0) total_teY = np.concatenate((cnn.teY, teYE), axis = 0) num_chunks = 20 trA = np.concatenate([cnn.activate(total_trX[(len(total_trX)/num_chunks*i):(len(total_trX)/num_chunks*(i+1))]) for i in range(num_chunks)]) teA = cnn.activate(total_teX) trC = np.argmax(total_trY, axis = 1) teC = np.argmax(total_teY, axis = 1) # convolutional neural network if "cnn" in top_layer: print("\nRetraining convolutional neural network on all tasks after excluding {0} from initial training".format(excluded)) start = time.time() # fit model with data cnn_accs = train_new_tasks(cnn, total_trX, total_trY, total_teX, total_teY, num_tasks, verbose, epochs, batch_size) end = time.time() print("ConvNet Retraining: {0:0.02f}sec".format(end-start)) # show accuracy improvement from additional model layer print("[ConvNet(exclusion)] Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1])) print("[ConvNet(exclusion)+ConvNet(all)] Testing data accuracy: {0:0.04f}".format(cnn_accs["total"][-1])) print("[(CN(E)+CN(A))-CN(E)] Accuracy improvement: {0:0.04f}".format(cnn_accs["total"][-1]-base_accuracies["total"][-1])) # generate and save accuracy figures if save_figs: for t in range(num_tasks): plt.plot(np.arange(0, epochs), cnn_accs[t], color = colors[t]) plt.plot(np.arange(0, epochs), cnn_accs["total"], color = "#FF0000", marker = "o") plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right") plt.axis([0, epochs-1, 0, 1]) plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.title("Model Accuracy") plt.savefig("figures/trained on {0}, excluded {1}, then retrained on all.png".format(task_nums, excluded), bbox_inches = "tight") plt.close() # efficient lifelong learning algorithm if "ella" in top_layer: print("\nTraining efficient lifelong learning algorithm on all tasks after excluding {0} from convnet training".format(excluded)) start = time.time() # fit model with data ella = ELLA(d = 625, k = 5, base_learner = LogisticRegression, base_learner_kwargs = {"tol": 10**-2}, mu = 10**-3) for task in range(num_tasks): ella.fit(trA, binarize(trC, task), task) predictions = np.argmax(np.asarray([ella.predict_logprobs(teA, i) for i in range(ella.T)]), axis = 0) ella_acc = np.mean(predictions == teC) end = time.time() print("ELLA: {0:0.02f}sec".format(end-start)) # show accuracy improvement from additional model layer print("[ConvNet] Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1])) print("[ConvNet+ELLA] Testing data accuracy: {0:0.04f}".format(ella_acc)) print("[(CN+ELLA)-CN] Accuracy improvement: {0:0.04f}".format(ella_acc-base_accuracies["total"][-1])) # generate and save accuracy figures if save_figs: pass # need to generate per-task or per-epoch accuracies to have a good visualization # logistic regression model if "lr" in top_layer: print("\nTraining logistic regression model on all tasks after excluding {0} from convnet training".format(excluded)) start = time.time() # fit model with data lr = LogisticRegression() lr.fit(trA, trC) logreg_accs = find_model_task_accuracies(lr, num_tasks, teA, teC) end = time.time() print("Logistic Regression: {0:0.02f}sec".format(end-start)) # show accuracy improvement from additional model layer print("[ConvNet] Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1])) print("[ConvNet+LogReg] Testing data accuracy: {0:0.04f}".format(logreg_accs["total"])) print("[(CN+LR)-CN] Accuracy improvement: {0:0.04f}".format(logreg_accs["total"]-base_accuracies["total"][-1])) if verbose: print("\nLogistic regression model accuracies after exclusion training:") for key, value in logreg_accs.items(): print("Task: {0}, accuracy: {1:0.04f}".format(key, value)) # generate and save accuracy figures if save_figs: plotX = ["Task {0}".format(t) for t in range(num_tasks)]+["Total", "Average"] plotY = [logreg_accs[t] for t in range(num_tasks)]+[logreg_accs["total"], np.mean(logreg_accs.values())] plt.bar(range(len(plotX)), plotY) plt.xticks(range(len(plotX)), plotX) plt.title("Model Accuracy") plt.savefig("figures/trained on {0}, excluded {1}, then logreg.png".format(task_nums, excluded), bbox_inches = "tight") plt.close() # support vector classifier if "svc" in top_layer: print("\nTraining linear support vector classifier on all tasks after excluding {0} from convnet training".format(excluded)) start = time.time() # fit model with data svc = LinearSVC() svc.fit(trA, trC) svc_accs = find_model_task_accuracies(svc, num_tasks, teA, teC) end = time.time() print("Support Vector Classifier: {0:0.02f}sec".format(end-start)) # show accuracy improvement from additional model layer print("[ConvNet] Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1])) print("[ConvNet+SVC] Testing data accuracy: {0:0.04f}".format(svc_accs["total"])) print("[(CN+SVC)-CN] Accuracy improvement: {0:0.04f}".format(svc_accs["total"]-base_accuracies["total"][-1])) if verbose: print("\nSupport vector classifier accuracies after exclusion training:") for key, value in svc_accs.items(): print("Task: {0}, accuracy: {1:0.04f}".format(key, value)) # generate and save accuracy figures if save_figs: plotX = ["Task {0}".format(t) for t in range(num_tasks)]+["Total", "Average"] plotY = [svc_accs[t] for t in range(num_tasks)]+[svc_accs["total"], np.mean(svc_accs.values())] plt.bar(range(len(plotX)), plotY) plt.xticks(range(len(plotX)), plotX) plt.title("Model Accuracy") plt.savefig("figures/trained on {0}, excluded {1}, then svc.png".format(task_nums, excluded), bbox_inches = "tight") plt.close() print("")
def calculate_catastrophic_interference(num_tasks, exclude_start, exclude_end, top_layer="cnn", save_figs=False, verbose=False, epochs=20, batch_size=100): excluded = range(exclude_start, exclude_end) task_nums = [i for i in range(num_tasks) if i not in excluded] start = time.time() cnn = ConvolutionalNeuralNetwork() cnn.initialize_mnist() # cnn.trX = cnn.trX[:int(len(cnn.trX)*.2)] # cnn.trY = cnn.trY[:int(len(cnn.trY)*.2)] # cnn.teX = cnn.teX[:int(len(cnn.teX)*.2)] # cnn.teY = cnn.teY[:int(len(cnn.teY)*.2)] cnn.trX, cnn.trY, trXE, trYE = split_dataset(excluded, cnn.trX, cnn.trY) cnn.teX, cnn.teY, teXE, teYE = split_dataset(excluded, cnn.teX, cnn.teY) cnn.create_model_functions() colors = [ "#00FF00", "#0000FF", "#00FFFF", "#FFFF00", "#FF00FF", "#000000", "#888888", "#FF8800", "#88FF00", "#FF0088" ] print("\nTraining on tasks {0}, excluding tasks {1}".format( task_nums, excluded)) base_accuracies = train_per_task(cnn, num_tasks, verbose, epochs, batch_size) end = time.time() print("Initial training: {0:0.02f}sec".format(end - start)) # base model, trained without excluded tasks # (which are then added back in one of the three top-layer models) # if save_figs: # for t in task_nums: # plt.plot(np.arange(0, epochs), accuracies[t], color = colors[t]) # plt.plot(np.arange(0, epochs), accuracies["total"], color = "#FF0000", marker = "o") # plt.axis([0, epochs-1, 0, 1]) # plt.xlabel("Epoch") # plt.ylabel("Accuracy") # plt.title("Model Accuracy") # plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right") # plt.savefig("figures/trained on {0}, excluded {1}.png".format(task_nums, excluded), bbox_inches = "tight") # plt.close() total_trX = np.concatenate((cnn.trX, trXE), axis=0) total_trY = np.concatenate((cnn.trY, trYE), axis=0) total_teX = np.concatenate((cnn.teX, teXE), axis=0) total_teY = np.concatenate((cnn.teY, teYE), axis=0) num_chunks = 20 trA = np.concatenate([ cnn.activate(total_trX[(len(total_trX) / num_chunks * i):(len(total_trX) / num_chunks * (i + 1))]) for i in range(num_chunks) ]) teA = cnn.activate(total_teX) trC = np.argmax(total_trY, axis=1) teC = np.argmax(total_teY, axis=1) # convolutional neural network if "cnn" in top_layer: print( "\nRetraining convolutional neural network on all tasks after excluding {0} from initial training" .format(excluded)) start = time.time() # fit model with data cnn_accs = train_new_tasks(cnn, total_trX, total_trY, total_teX, total_teY, num_tasks, verbose, epochs, batch_size) end = time.time() print("ConvNet Retraining: {0:0.02f}sec".format(end - start)) # show accuracy improvement from additional model layer print( "[ConvNet(exclusion)] Testing data accuracy: {0:0.04f}" .format(base_accuracies["total"][-1])) print( "[ConvNet(exclusion)+ConvNet(all)] Testing data accuracy: {0:0.04f}" .format(cnn_accs["total"][-1])) print( "[(CN(E)+CN(A))-CN(E)] Accuracy improvement: {0:0.04f}" .format(cnn_accs["total"][-1] - base_accuracies["total"][-1])) # generate and save accuracy figures if save_figs: for t in range(num_tasks): plt.plot(np.arange(0, epochs), cnn_accs[t], color=colors[t]) plt.plot(np.arange(0, epochs), cnn_accs["total"], color="#FF0000", marker="o") plt.legend(["Task {0}".format(t) for t in task_nums] + ["Total"], loc="lower right") plt.axis([0, epochs - 1, 0, 1]) plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.title("Model Accuracy") plt.savefig( "figures/trained on {0}, excluded {1}, then retrained on all.png" .format(task_nums, excluded), bbox_inches="tight") plt.close() # efficient lifelong learning algorithm if "ella" in top_layer: print( "\nTraining efficient lifelong learning algorithm on all tasks after excluding {0} from convnet training" .format(excluded)) start = time.time() # fit model with data ella = ELLA(d=625, k=5, base_learner=LogisticRegression, base_learner_kwargs={"tol": 10**-2}, mu=10**-3) for task in range(num_tasks): ella.fit(trA, binarize(trC, task), task) predictions = np.argmax(np.asarray( [ella.predict_logprobs(teA, i) for i in range(ella.T)]), axis=0) ella_acc = np.mean(predictions == teC) end = time.time() print("ELLA: {0:0.02f}sec".format(end - start)) # show accuracy improvement from additional model layer print( "[ConvNet] Testing data accuracy: {0:0.04f}" .format(base_accuracies["total"][-1])) print( "[ConvNet+ELLA] Testing data accuracy: {0:0.04f}" .format(ella_acc)) print( "[(CN+ELLA)-CN] Accuracy improvement: {0:0.04f}" .format(ella_acc - base_accuracies["total"][-1])) # generate and save accuracy figures if save_figs: pass # need to generate per-task or per-epoch accuracies to have a good visualization # logistic regression model if "lr" in top_layer: print( "\nTraining logistic regression model on all tasks after excluding {0} from convnet training" .format(excluded)) start = time.time() # fit model with data lr = LogisticRegression() lr.fit(trA, trC) logreg_accs = find_model_task_accuracies(lr, num_tasks, teA, teC) end = time.time() print("Logistic Regression: {0:0.02f}sec".format(end - start)) # show accuracy improvement from additional model layer print( "[ConvNet] Testing data accuracy: {0:0.04f}" .format(base_accuracies["total"][-1])) print( "[ConvNet+LogReg] Testing data accuracy: {0:0.04f}" .format(logreg_accs["total"])) print( "[(CN+LR)-CN] Accuracy improvement: {0:0.04f}" .format(logreg_accs["total"] - base_accuracies["total"][-1])) if verbose: print( "\nLogistic regression model accuracies after exclusion training:" ) for key, value in logreg_accs.items(): print("Task: {0}, accuracy: {1:0.04f}".format(key, value)) # generate and save accuracy figures if save_figs: plotX = ["Task {0}".format(t) for t in range(num_tasks)] + ["Total", "Average"] plotY = [logreg_accs[t] for t in range(num_tasks) ] + [logreg_accs["total"], np.mean(logreg_accs.values())] plt.bar(range(len(plotX)), plotY) plt.xticks(range(len(plotX)), plotX) plt.title("Model Accuracy") plt.savefig( "figures/trained on {0}, excluded {1}, then logreg.png".format( task_nums, excluded), bbox_inches="tight") plt.close() # support vector classifier if "svc" in top_layer: print( "\nTraining linear support vector classifier on all tasks after excluding {0} from convnet training" .format(excluded)) start = time.time() # fit model with data svc = LinearSVC() svc.fit(trA, trC) svc_accs = find_model_task_accuracies(svc, num_tasks, teA, teC) end = time.time() print("Support Vector Classifier: {0:0.02f}sec".format(end - start)) # show accuracy improvement from additional model layer print( "[ConvNet] Testing data accuracy: {0:0.04f}" .format(base_accuracies["total"][-1])) print( "[ConvNet+SVC] Testing data accuracy: {0:0.04f}" .format(svc_accs["total"])) print( "[(CN+SVC)-CN] Accuracy improvement: {0:0.04f}" .format(svc_accs["total"] - base_accuracies["total"][-1])) if verbose: print( "\nSupport vector classifier accuracies after exclusion training:" ) for key, value in svc_accs.items(): print("Task: {0}, accuracy: {1:0.04f}".format(key, value)) # generate and save accuracy figures if save_figs: plotX = ["Task {0}".format(t) for t in range(num_tasks)] + ["Total", "Average"] plotY = [svc_accs[t] for t in range(num_tasks) ] + [svc_accs["total"], np.mean(svc_accs.values())] plt.bar(range(len(plotX)), plotY) plt.xticks(range(len(plotX)), plotX) plt.title("Model Accuracy") plt.savefig( "figures/trained on {0}, excluded {1}, then svc.png".format( task_nums, excluded), bbox_inches="tight") plt.close() print("")