コード例 #1
0
    def test(self, teX, teY, task, batch_size=100):
        vround = np.vectorize(lambda x: int(round(
            x)))  # vround turns outputs from probabilities to binary 0/1

        if self.mode == "frozen" or self.mode == "unfrozen":
            cnn = self.nets[task]
            probabilities = np.asarray([])
            for start, end in zip(
                    range(0, len(teX), batch_size),
                    range(batch_size,
                          len(teX) + batch_size, batch_size)):
                probabilities = np.append(
                    probabilities,
                    cnn.predict_probs(teX[start:end])[:, 1])
            predictions = vround(probabilities)

        elif self.mode == "stacking":
            predictions = []
            for cnn in self.nets[task]:
                probabilities = np.asarray([])
                for start, end in zip(
                        range(0, len(teX), batch_size),
                        range(batch_size,
                              len(teX) + batch_size, batch_size)):
                    probabilities = np.append(
                        probabilities,
                        cnn.predict_probs(teX[start:end])[:, 1])
                predictions.append(probabilities)
            # combine predictions from each of the task's nets
            predictions = vround(np.mean(predictions, axis=0))

        return np.mean(binarize(teY, task)[:, np.newaxis] == predictions)
コード例 #2
0
    def train(self, trX, trY, epochs=10, verbose=False, batch_size=100):
        if self.mode == "frozen":
            # find any new tasks that we don't already have a net for
            tasks = np.setdiff1d(np.unique(trY), np.asarray(self.tasks))
        elif self.mode == "unfrozen" or self.mode == "stacking":
            # use all tasks
            tasks = np.unique(trY)
        # for each one, train it on a binarized random sampling, keeping all positive examples of
        # the current task and using a percentage of all other tasks as the negative examples,
        # since we need both positive and negative examples to properly train a neural network
        for task in tasks:
            if verbose:
                print("Training new net for task {0}".format(task))

            trXr, trYr = random_sampling(data_set=trX,
                                         data_labels=trY,
                                         p_kept=0.2,
                                         to_keep=task)
            trB = binarize(trYr, task)[:, np.newaxis]
            trB = np.concatenate((np.logical_not(trB).astype(np.int64), trB),
                                 axis=1)

            if self.mode == "frozen" or self.mode == "unfrozen":
                prev = None if len(self.nets) == 0 else self.nets[self.newest]
            elif self.mode == "stacking":
                prev = None if len(
                    self.nets) == 0 else self.nets[self.newest][-1]

            if self.mode == "unfrozen" and task in np.asarray(self.tasks):
                cnn = self.nets[task]
                cnn.train_net(epochs, batch_size, trX=trXr, trY=trB)
            else:
                cnn = self.nnet(trXr, trB, prev, epochs)

            self.tasks.append(task)
            self.newest = task
            if self.mode == "frozen" or self.mode == "unfrozen":
                self.nets[task] = cnn
            elif self.mode == "stacking":
                if task not in self.nets:
                    self.nets[task] = []
                self.nets[task].append(cnn)

        return self
コード例 #3
0
	def test(self, teX, teY, task, batch_size = 100):
		vround = np.vectorize(lambda x: int(round(x))) # vround turns outputs from probabilities to binary 0/1

		if self.mode == "frozen" or self.mode == "unfrozen":
			cnn = self.nets[task]
			probabilities = np.asarray([])
			for start, end in zip(range(0, len(teX), batch_size), range(batch_size, len(teX)+batch_size, batch_size)):
				probabilities = np.append(probabilities, cnn.predict_probs(teX[start:end])[:, 1])
			predictions = vround(probabilities)

		elif self.mode == "stacking":
			predictions = []
			for cnn in self.nets[task]:
				probabilities = np.asarray([])
				for start, end in zip(range(0, len(teX), batch_size), range(batch_size, len(teX)+batch_size, batch_size)):
					probabilities = np.append(probabilities, cnn.predict_probs(teX[start:end])[:, 1])
				predictions.append(probabilities)
			# combine predictions from each of the task's nets
			predictions = vround(np.mean(predictions, axis = 0))

		return np.mean(binarize(teY, task)[:, np.newaxis] == predictions)
コード例 #4
0
	def train(self, trX, trY, epochs = 10, verbose = False, batch_size = 100):
		if self.mode == "frozen":
			# find any new tasks that we don't already have a net for
			tasks = np.setdiff1d(np.unique(trY), np.asarray(self.tasks))
		elif self.mode == "unfrozen" or self.mode == "stacking":
			# use all tasks
			tasks = np.unique(trY)
		# for each one, train it on a binarized random sampling, keeping all positive examples of
		# the current task and using a percentage of all other tasks as the negative examples,
		# since we need both positive and negative examples to properly train a neural network
		for task in tasks:
			if verbose:
				print("Training new net for task {0}".format(task))

			trXr, trYr = random_sampling(data_set = trX, data_labels = trY, p_kept = 0.2, to_keep = task)
			trB = binarize(trYr, task)[:, np.newaxis]
			trB = np.concatenate((np.logical_not(trB).astype(np.int64), trB), axis = 1)

			if self.mode == "frozen" or self.mode == "unfrozen":
				prev = None if len(self.nets) == 0 else self.nets[self.newest]
			elif self.mode == "stacking":
				prev = None if len(self.nets) == 0 else self.nets[self.newest][-1]

			if self.mode == "unfrozen" and task in np.asarray(self.tasks):
				cnn = self.nets[task]
				cnn.train_net(epochs, batch_size, trX = trXr, trY = trB)
			else:
				cnn = self.nnet(trXr, trB, prev, epochs)

			self.tasks.append(task)
			self.newest = task
			if self.mode == "frozen" or self.mode == "unfrozen":
				self.nets[task] = cnn
			elif self.mode == "stacking":
				if task not in self.nets:
					self.nets[task] = []
				self.nets[task].append(cnn)

		return self
コード例 #5
0

if __name__ == "__main__":
    caffe.set_device(0)
    caffe.set_mode_gpu()

    net = caffe.Net("examples/mnist/lenet_auto_test.prototxt",
                    "examples/mnist/lenet_iter_10000.caffemodel", caffe.TEST)
    trX, trY = open_dataset("examples/mnist/mnist_train_lmdb")
    teX, teY = open_dataset("examples/mnist/mnist_test_lmdb")
    predictions = net.predict(teX)
    print("\nNet Predictions:")
    print("Predicted: {0}".format(Counter(predictions)))
    print("Actual:    {0}".format(Counter(teY)))
    print("Accuracy:  {0:0.04f}".format(np.mean(predictions == teY)))

    # todo:

    # figure out how to extract activations for top-layer model
    # these seem to be the weights, not activations
    print("\nNet Blobs:")
    for key, val in net.blobs.items():
        print("  {0}, {1}".format(key, val.data.shape))

    # make binary nets for multi-net model
    # I can change values here, but it needs to be done before training and loading the nets
    print("\nBinarized Labels:")
    print(trY[:20].tolist())
    for c in range(10):
        print(binarize(trY[:20], c).tolist())
コード例 #6
0
    ]
    teA = np.concatenate(teAs)
    print("teA.shape: {0}".format(teA.shape))
    trX, teX, trY, teY = load.mnist(onehot=True)
    trC = np.argmax(trY, axis=1)
    print("trC.shape: {0}".format(trC.shape))
    teC = np.argmax(teY, axis=1)
    print("teC.shape: {0}".format(teC.shape))
    print("Done.")

    print("\nCreating ELLA Model...")
    num_params = 625
    num_latent = 20
    ella = ELLA.ELLA(num_params, num_latent, LogisticRegression, mu=10**-3)
    for task in range(10):
        result_vector = binarize(trC, task)
        ella.fit(trA, result_vector, task)
        print("Trained task {0}".format(task))
    print("Sparsity coefficients: {0}".format(ella.S))
    print("Done.")

    print("\nAnalyzing Training Data...")
    predictions = np.argmax(np.asarray(
        [ella.predict_logprobs(trA, i) for i in range(ella.T)]),
                            axis=0)
    print("predictions.shape: {0}".format(predictions.shape))
    accuracy = np.mean(predictions == trC)
    print("accuracy: {0:0.04f}".format(accuracy))
    print("per-task binary accuracy:")
    for task_id in range(ella.T):
        print("  {0} - {1:0.04f}".format(
def calculate_catastrophic_interference(num_tasks, exclude_start, exclude_end, top_layer = "cnn", save_figs = False, verbose = False, epochs = 20, batch_size = 100):
	excluded = range(exclude_start, exclude_end)
	task_nums = [i for i in range(num_tasks) if i not in excluded]

	start = time.time()
	cnn = ConvolutionalNeuralNetwork()
	cnn.initialize_mnist()

#	cnn.trX = cnn.trX[:int(len(cnn.trX)*.2)]
#	cnn.trY = cnn.trY[:int(len(cnn.trY)*.2)]
#	cnn.teX = cnn.teX[:int(len(cnn.teX)*.2)]
#	cnn.teY = cnn.teY[:int(len(cnn.teY)*.2)]

	cnn.trX, cnn.trY, trXE, trYE = split_dataset(excluded, cnn.trX, cnn.trY)
	cnn.teX, cnn.teY, teXE, teYE = split_dataset(excluded, cnn.teX, cnn.teY)

	cnn.create_model_functions()

	colors = ["#00FF00", "#0000FF", "#00FFFF", "#FFFF00", "#FF00FF", "#000000", "#888888", "#FF8800", "#88FF00", "#FF0088"]

	print("\nTraining on tasks {0}, excluding tasks {1}".format(task_nums, excluded))
	base_accuracies = train_per_task(cnn, num_tasks, verbose, epochs, batch_size)
	end = time.time()
	print("Initial training: {0:0.02f}sec".format(end-start))

#	base model, trained without excluded tasks
#	(which are then added back in one of the three top-layer models)
#	if save_figs:
#		for t in task_nums:
#			plt.plot(np.arange(0, epochs), accuracies[t], color = colors[t])
#		plt.plot(np.arange(0, epochs), accuracies["total"], color = "#FF0000", marker = "o")
#		plt.axis([0, epochs-1, 0, 1])
#		plt.xlabel("Epoch")
#		plt.ylabel("Accuracy")
#		plt.title("Model Accuracy")
#		plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right")
#		plt.savefig("figures/trained on {0}, excluded {1}.png".format(task_nums, excluded), bbox_inches = "tight")
#		plt.close()

	total_trX = np.concatenate((cnn.trX, trXE), axis = 0)
	total_trY = np.concatenate((cnn.trY, trYE), axis = 0)
	total_teX = np.concatenate((cnn.teX, teXE), axis = 0)
	total_teY = np.concatenate((cnn.teY, teYE), axis = 0)

	num_chunks = 20
	trA = np.concatenate([cnn.activate(total_trX[(len(total_trX)/num_chunks*i):(len(total_trX)/num_chunks*(i+1))]) for i in range(num_chunks)])
	teA = cnn.activate(total_teX)
	trC = np.argmax(total_trY, axis = 1)
	teC = np.argmax(total_teY, axis = 1)

	# convolutional neural network
	if "cnn" in top_layer:
		print("\nRetraining convolutional neural network on all tasks after excluding {0} from initial training".format(excluded))
		start = time.time()

		# fit model with data
		cnn_accs = train_new_tasks(cnn, total_trX, total_trY, total_teX, total_teY, num_tasks, verbose, epochs, batch_size)

		end = time.time()
		print("ConvNet Retraining: {0:0.02f}sec".format(end-start))

		# show accuracy improvement from additional model layer
		print("[ConvNet(exclusion)]              Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1]))
		print("[ConvNet(exclusion)+ConvNet(all)] Testing data accuracy: {0:0.04f}".format(cnn_accs["total"][-1]))
		print("[(CN(E)+CN(A))-CN(E)]             Accuracy improvement:  {0:0.04f}".format(cnn_accs["total"][-1]-base_accuracies["total"][-1]))

		# generate and save accuracy figures
		if save_figs:
			for t in range(num_tasks):
				plt.plot(np.arange(0, epochs), cnn_accs[t], color = colors[t])
			plt.plot(np.arange(0, epochs), cnn_accs["total"], color = "#FF0000", marker = "o")
			plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right")
			plt.axis([0, epochs-1, 0, 1])
			plt.xlabel("Epoch")
			plt.ylabel("Accuracy")
			plt.title("Model Accuracy")
			plt.savefig("figures/trained on {0}, excluded {1}, then retrained on all.png".format(task_nums, excluded), bbox_inches = "tight")
			plt.close()

	# efficient lifelong learning algorithm
	if "ella" in top_layer:
		print("\nTraining efficient lifelong learning algorithm on all tasks after excluding {0} from convnet training".format(excluded))
		start = time.time()

		# fit model with data
		ella = ELLA(d = 625, k = 5, base_learner = LogisticRegression, base_learner_kwargs = {"tol": 10**-2}, mu = 10**-3)
		for task in range(num_tasks):
			ella.fit(trA, binarize(trC, task), task)
		predictions = np.argmax(np.asarray([ella.predict_logprobs(teA, i) for i in range(ella.T)]), axis = 0)
		ella_acc = np.mean(predictions == teC)

		end = time.time()
		print("ELLA: {0:0.02f}sec".format(end-start))

		# show accuracy improvement from additional model layer
		print("[ConvNet]                         Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1]))
		print("[ConvNet+ELLA]                    Testing data accuracy: {0:0.04f}".format(ella_acc))
		print("[(CN+ELLA)-CN]                    Accuracy improvement:  {0:0.04f}".format(ella_acc-base_accuracies["total"][-1]))

		# generate and save accuracy figures
		if save_figs:
			pass # need to generate per-task or per-epoch accuracies to have a good visualization

	# logistic regression model
	if "lr" in top_layer:
		print("\nTraining logistic regression model on all tasks after excluding {0} from convnet training".format(excluded))
		start = time.time()

		# fit model with data
		lr = LogisticRegression()
		lr.fit(trA, trC)
		logreg_accs = find_model_task_accuracies(lr, num_tasks, teA, teC)

		end = time.time()
		print("Logistic Regression: {0:0.02f}sec".format(end-start))

		# show accuracy improvement from additional model layer
		print("[ConvNet]                         Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1]))
		print("[ConvNet+LogReg]                  Testing data accuracy: {0:0.04f}".format(logreg_accs["total"]))
		print("[(CN+LR)-CN]                      Accuracy improvement:  {0:0.04f}".format(logreg_accs["total"]-base_accuracies["total"][-1]))

		if verbose:
			print("\nLogistic regression model accuracies after exclusion training:")
			for key, value in logreg_accs.items():
				print("Task: {0}, accuracy: {1:0.04f}".format(key, value))

		# generate and save accuracy figures
		if save_figs:
			plotX = ["Task {0}".format(t) for t in range(num_tasks)]+["Total", "Average"]
			plotY = [logreg_accs[t] for t in range(num_tasks)]+[logreg_accs["total"], np.mean(logreg_accs.values())]
			plt.bar(range(len(plotX)), plotY)
			plt.xticks(range(len(plotX)), plotX)
			plt.title("Model Accuracy")
			plt.savefig("figures/trained on {0}, excluded {1}, then logreg.png".format(task_nums, excluded), bbox_inches = "tight")
			plt.close()

	# support vector classifier
	if "svc" in top_layer:
		print("\nTraining linear support vector classifier on all tasks after excluding {0} from convnet training".format(excluded))
		start = time.time()

		# fit model with data
		svc = LinearSVC()
		svc.fit(trA, trC)
		svc_accs = find_model_task_accuracies(svc, num_tasks, teA, teC)

		end = time.time()
		print("Support Vector Classifier: {0:0.02f}sec".format(end-start))

		# show accuracy improvement from additional model layer
		print("[ConvNet]                         Testing data accuracy: {0:0.04f}".format(base_accuracies["total"][-1]))
		print("[ConvNet+SVC]                     Testing data accuracy: {0:0.04f}".format(svc_accs["total"]))
		print("[(CN+SVC)-CN]                     Accuracy improvement:  {0:0.04f}".format(svc_accs["total"]-base_accuracies["total"][-1]))

		if verbose:
			print("\nSupport vector classifier accuracies after exclusion training:")
			for key, value in svc_accs.items():
				print("Task: {0}, accuracy: {1:0.04f}".format(key, value))

		# generate and save accuracy figures
		if save_figs:
			plotX = ["Task {0}".format(t) for t in range(num_tasks)]+["Total", "Average"]
			plotY = [svc_accs[t] for t in range(num_tasks)]+[svc_accs["total"], np.mean(svc_accs.values())]
			plt.bar(range(len(plotX)), plotY)
			plt.xticks(range(len(plotX)), plotX)
			plt.title("Model Accuracy")
			plt.savefig("figures/trained on {0}, excluded {1}, then svc.png".format(task_nums, excluded), bbox_inches = "tight")
			plt.close()

	print("")
コード例 #8
0
def calculate_catastrophic_interference(num_tasks,
                                        exclude_start,
                                        exclude_end,
                                        top_layer="cnn",
                                        save_figs=False,
                                        verbose=False,
                                        epochs=20,
                                        batch_size=100):
    excluded = range(exclude_start, exclude_end)
    task_nums = [i for i in range(num_tasks) if i not in excluded]

    start = time.time()
    cnn = ConvolutionalNeuralNetwork()
    cnn.initialize_mnist()

    #	cnn.trX = cnn.trX[:int(len(cnn.trX)*.2)]
    #	cnn.trY = cnn.trY[:int(len(cnn.trY)*.2)]
    #	cnn.teX = cnn.teX[:int(len(cnn.teX)*.2)]
    #	cnn.teY = cnn.teY[:int(len(cnn.teY)*.2)]

    cnn.trX, cnn.trY, trXE, trYE = split_dataset(excluded, cnn.trX, cnn.trY)
    cnn.teX, cnn.teY, teXE, teYE = split_dataset(excluded, cnn.teX, cnn.teY)

    cnn.create_model_functions()

    colors = [
        "#00FF00", "#0000FF", "#00FFFF", "#FFFF00", "#FF00FF", "#000000",
        "#888888", "#FF8800", "#88FF00", "#FF0088"
    ]

    print("\nTraining on tasks {0}, excluding tasks {1}".format(
        task_nums, excluded))
    base_accuracies = train_per_task(cnn, num_tasks, verbose, epochs,
                                     batch_size)
    end = time.time()
    print("Initial training: {0:0.02f}sec".format(end - start))

    #	base model, trained without excluded tasks
    #	(which are then added back in one of the three top-layer models)
    #	if save_figs:
    #		for t in task_nums:
    #			plt.plot(np.arange(0, epochs), accuracies[t], color = colors[t])
    #		plt.plot(np.arange(0, epochs), accuracies["total"], color = "#FF0000", marker = "o")
    #		plt.axis([0, epochs-1, 0, 1])
    #		plt.xlabel("Epoch")
    #		plt.ylabel("Accuracy")
    #		plt.title("Model Accuracy")
    #		plt.legend(["Task {0}".format(t) for t in task_nums]+["Total"], loc = "lower right")
    #		plt.savefig("figures/trained on {0}, excluded {1}.png".format(task_nums, excluded), bbox_inches = "tight")
    #		plt.close()

    total_trX = np.concatenate((cnn.trX, trXE), axis=0)
    total_trY = np.concatenate((cnn.trY, trYE), axis=0)
    total_teX = np.concatenate((cnn.teX, teXE), axis=0)
    total_teY = np.concatenate((cnn.teY, teYE), axis=0)

    num_chunks = 20
    trA = np.concatenate([
        cnn.activate(total_trX[(len(total_trX) / num_chunks *
                                i):(len(total_trX) / num_chunks * (i + 1))])
        for i in range(num_chunks)
    ])
    teA = cnn.activate(total_teX)
    trC = np.argmax(total_trY, axis=1)
    teC = np.argmax(total_teY, axis=1)

    # convolutional neural network
    if "cnn" in top_layer:
        print(
            "\nRetraining convolutional neural network on all tasks after excluding {0} from initial training"
            .format(excluded))
        start = time.time()

        # fit model with data
        cnn_accs = train_new_tasks(cnn, total_trX, total_trY, total_teX,
                                   total_teY, num_tasks, verbose, epochs,
                                   batch_size)

        end = time.time()
        print("ConvNet Retraining: {0:0.02f}sec".format(end - start))

        # show accuracy improvement from additional model layer
        print(
            "[ConvNet(exclusion)]              Testing data accuracy: {0:0.04f}"
            .format(base_accuracies["total"][-1]))
        print(
            "[ConvNet(exclusion)+ConvNet(all)] Testing data accuracy: {0:0.04f}"
            .format(cnn_accs["total"][-1]))
        print(
            "[(CN(E)+CN(A))-CN(E)]             Accuracy improvement:  {0:0.04f}"
            .format(cnn_accs["total"][-1] - base_accuracies["total"][-1]))

        # generate and save accuracy figures
        if save_figs:
            for t in range(num_tasks):
                plt.plot(np.arange(0, epochs), cnn_accs[t], color=colors[t])
            plt.plot(np.arange(0, epochs),
                     cnn_accs["total"],
                     color="#FF0000",
                     marker="o")
            plt.legend(["Task {0}".format(t) for t in task_nums] + ["Total"],
                       loc="lower right")
            plt.axis([0, epochs - 1, 0, 1])
            plt.xlabel("Epoch")
            plt.ylabel("Accuracy")
            plt.title("Model Accuracy")
            plt.savefig(
                "figures/trained on {0}, excluded {1}, then retrained on all.png"
                .format(task_nums, excluded),
                bbox_inches="tight")
            plt.close()

    # efficient lifelong learning algorithm
    if "ella" in top_layer:
        print(
            "\nTraining efficient lifelong learning algorithm on all tasks after excluding {0} from convnet training"
            .format(excluded))
        start = time.time()

        # fit model with data
        ella = ELLA(d=625,
                    k=5,
                    base_learner=LogisticRegression,
                    base_learner_kwargs={"tol": 10**-2},
                    mu=10**-3)
        for task in range(num_tasks):
            ella.fit(trA, binarize(trC, task), task)
        predictions = np.argmax(np.asarray(
            [ella.predict_logprobs(teA, i) for i in range(ella.T)]),
                                axis=0)
        ella_acc = np.mean(predictions == teC)

        end = time.time()
        print("ELLA: {0:0.02f}sec".format(end - start))

        # show accuracy improvement from additional model layer
        print(
            "[ConvNet]                         Testing data accuracy: {0:0.04f}"
            .format(base_accuracies["total"][-1]))
        print(
            "[ConvNet+ELLA]                    Testing data accuracy: {0:0.04f}"
            .format(ella_acc))
        print(
            "[(CN+ELLA)-CN]                    Accuracy improvement:  {0:0.04f}"
            .format(ella_acc - base_accuracies["total"][-1]))

        # generate and save accuracy figures
        if save_figs:
            pass  # need to generate per-task or per-epoch accuracies to have a good visualization

    # logistic regression model
    if "lr" in top_layer:
        print(
            "\nTraining logistic regression model on all tasks after excluding {0} from convnet training"
            .format(excluded))
        start = time.time()

        # fit model with data
        lr = LogisticRegression()
        lr.fit(trA, trC)
        logreg_accs = find_model_task_accuracies(lr, num_tasks, teA, teC)

        end = time.time()
        print("Logistic Regression: {0:0.02f}sec".format(end - start))

        # show accuracy improvement from additional model layer
        print(
            "[ConvNet]                         Testing data accuracy: {0:0.04f}"
            .format(base_accuracies["total"][-1]))
        print(
            "[ConvNet+LogReg]                  Testing data accuracy: {0:0.04f}"
            .format(logreg_accs["total"]))
        print(
            "[(CN+LR)-CN]                      Accuracy improvement:  {0:0.04f}"
            .format(logreg_accs["total"] - base_accuracies["total"][-1]))

        if verbose:
            print(
                "\nLogistic regression model accuracies after exclusion training:"
            )
            for key, value in logreg_accs.items():
                print("Task: {0}, accuracy: {1:0.04f}".format(key, value))

        # generate and save accuracy figures
        if save_figs:
            plotX = ["Task {0}".format(t)
                     for t in range(num_tasks)] + ["Total", "Average"]
            plotY = [logreg_accs[t] for t in range(num_tasks)
                     ] + [logreg_accs["total"],
                          np.mean(logreg_accs.values())]
            plt.bar(range(len(plotX)), plotY)
            plt.xticks(range(len(plotX)), plotX)
            plt.title("Model Accuracy")
            plt.savefig(
                "figures/trained on {0}, excluded {1}, then logreg.png".format(
                    task_nums, excluded),
                bbox_inches="tight")
            plt.close()

    # support vector classifier
    if "svc" in top_layer:
        print(
            "\nTraining linear support vector classifier on all tasks after excluding {0} from convnet training"
            .format(excluded))
        start = time.time()

        # fit model with data
        svc = LinearSVC()
        svc.fit(trA, trC)
        svc_accs = find_model_task_accuracies(svc, num_tasks, teA, teC)

        end = time.time()
        print("Support Vector Classifier: {0:0.02f}sec".format(end - start))

        # show accuracy improvement from additional model layer
        print(
            "[ConvNet]                         Testing data accuracy: {0:0.04f}"
            .format(base_accuracies["total"][-1]))
        print(
            "[ConvNet+SVC]                     Testing data accuracy: {0:0.04f}"
            .format(svc_accs["total"]))
        print(
            "[(CN+SVC)-CN]                     Accuracy improvement:  {0:0.04f}"
            .format(svc_accs["total"] - base_accuracies["total"][-1]))

        if verbose:
            print(
                "\nSupport vector classifier accuracies after exclusion training:"
            )
            for key, value in svc_accs.items():
                print("Task: {0}, accuracy: {1:0.04f}".format(key, value))

        # generate and save accuracy figures
        if save_figs:
            plotX = ["Task {0}".format(t)
                     for t in range(num_tasks)] + ["Total", "Average"]
            plotY = [svc_accs[t] for t in range(num_tasks)
                     ] + [svc_accs["total"],
                          np.mean(svc_accs.values())]
            plt.bar(range(len(plotX)), plotY)
            plt.xticks(range(len(plotX)), plotX)
            plt.title("Model Accuracy")
            plt.savefig(
                "figures/trained on {0}, excluded {1}, then svc.png".format(
                    task_nums, excluded),
                bbox_inches="tight")
            plt.close()

    print("")
コード例 #9
0
	teAs = [np.asarray(load_activations("saved/teA{0:02d}.txt".format(i), (10000 / num_chunks, 625)).eval()) for i in range(num_chunks)]
	teA = np.concatenate(teAs)
	print("teA.shape: {0}".format(teA.shape))
	trX, teX, trY, teY = load.mnist(onehot = True)
	trC = np.argmax(trY, axis = 1)
	print("trC.shape: {0}".format(trC.shape))
	teC = np.argmax(teY, axis = 1)
	print("teC.shape: {0}".format(teC.shape))
	print("Done.")

	print("\nCreating ELLA Model...")
	num_params = 625
	num_latent = 20
	ella = ELLA.ELLA(num_params, num_latent, LogisticRegression, mu = 10 ** -3)
	for task in range(10):
		result_vector = binarize(trC, task)
		ella.fit(trA, result_vector, task)
		print("Trained task {0}".format(task))
	print("Sparsity coefficients: {0}".format(ella.S))
	print("Done.")

	print("\nAnalyzing Training Data...")
	predictions = np.argmax(np.asarray([ella.predict_logprobs(trA, i) for i in range(ella.T)]), axis = 0)
	print("predictions.shape: {0}".format(predictions.shape))
	accuracy = np.mean(predictions == trC)
	print("accuracy: {0:0.04f}".format(accuracy))
	print("per-task binary accuracy:")
	for task_id in range(ella.T):
		print("  {0} - {1:0.04f}".format(task_id, np.mean(binarize(predictions, task_id) == binarize(trC, task_id))))
	print("Done.")