def mnist(parameters, loss_function): dataset = 0 digits = [] for p in parameters: if (p == "testing"): dataset = 1 if (p.isdigit()): digits.append(int(p)) output_size = 10 if (len(digits) == 0): images, labels = mb.load_mnist( dataset=("training" if not dataset else "testing")) # If specified which images to get: (i.e. [1, 4, 6]) else: images, labels = mb.load_mnist( dataset=("training" if not dataset else "testing"), digits=digits) # Creating [input, output] - cases, with normalized, flattened images, and int label vectors as output (sparse need integers, not vectors): cases = [[ mb.flatten_image(i) / la.norm(i), TFT.int_to_one_hot(int(l[0]), output_size) ] for (i, l) in zip(images, labels)] print("Total cases collected: ", len(cases)) return cases
def run(self, delta, epochs): trX, trY = mnist_b.load_mnist() trX = self.floatX(trX) trX = trX/255. trX = trX.reshape((60000,28*28)).astype(float) trY = one_hot(trY, 10) teX, teY = mnist_b.load_mnist("testing") teX = self.floatX(teX) teX = teX/255. teX = teX.reshape((10000,28*28)).astype(float) teY = one_hot(teY, 10) result_list = [self.neuronsInHiddenLayers, self.listOfFunctions, delta, epochs,[]] print ("Starting...") self.printSetUp() for i in range(epochs): for start, end in zip(range(0, len(trX), delta), range(delta, len(trX), delta)): self.cost = self.train(trX[start:end], trY[start:end]) predicted = np.mean(np.argmax(teY, axis=1) == self.predict(teX)) * 100 result_list[4].append(predicted) print ("epoch: " + str(i + 1)) print ("Epoch number " + str(i + 1) + " predicted : " + str(predicted) + str(" % correct")) print (result_list) plt.plot(result_list[4]) plt.ylabel('correctness rate') plt.xlabel('epochs') plt.show()
def main(): images, labels = MNIST.gen_flat_cases(digits=np.arange(10),type='training',cases=(MNIST.load_mnist(dataset="training", digits=np.arange(10), path="datasets/"))) images = np.divide(images,255) ann = ANN(images, [10, 10], 0.1) ann.do_training(10, test_interval=10) return ann.do_testing()
def mnist(self): data_set = MNIST.load_mnist() flat_set = MNIST.gen_flat_cases(cases = data_set) return_set = [] for i in range(len(flat_set[0])): return_set.append([flat_set[0][i], TFT.int_to_one_hot(flat_set[1][i], 10)]) return return_set
def main(): images, labels = MNIST.gen_flat_cases(digits=np.arange(10), type='training', cases=(MNIST.load_mnist( dataset="training", digits=np.arange(10), path="datasets/"))) images = np.divide(images, 255) ann = ANN(images, [10, 10], 0.1) ann.do_training(10, test_interval=10) return ann.do_testing()
def run(self, delta, epochs): trX, trY = mnist_b.load_mnist() trX = self.floatX(trX) trX = trX / 255. trX = trX.reshape((60000, 28 * 28)).astype(float) trY = one_hot(trY, 10) teX, teY = mnist_b.load_mnist("testing") teX = self.floatX(teX) teX = teX / 255. teX = teX.reshape((10000, 28 * 28)).astype(float) teY = one_hot(teY, 10) result_list = [ self.neuronsInHiddenLayers, self.listOfFunctions, delta, epochs, [] ] print("Starting...") self.printSetUp() for i in range(epochs): for start, end in zip(range(0, len(trX), delta), range(delta, len(trX), delta)): self.cost = self.train(trX[start:end], trY[start:end]) predicted = np.mean( np.argmax(teY, axis=1) == self.predict(teX)) * 100 result_list[4].append(predicted) print("epoch: " + str(i + 1)) print("Epoch number " + str(i + 1) + " predicted : " + str(predicted) + str(" % correct")) print(result_list) plt.plot(result_list[4]) plt.ylabel('correctness rate') plt.xlabel('epochs') plt.show()