Пример #1
0
def fnn(hidden_neurons, epochs, mini_batch_size, learning_rate,
        training_feedback, training_dataset_type: Training_Dataset):
    """Feedforward neural network using stochastic gradient descent."""

    # Loads traninig data
    training_data, validation_data, test_data = mnist_loader.load_data()

    # Select the training dataset
    if (training_dataset_type == Training_Dataset.Validation_Data):
        training_dataset = validation_data
    elif (training_dataset_type == Training_Dataset.Test_Data):
        training_dataset = test_data

    # Basic three-layers based neural network with 748 neurons on input layer,'hidden_neurons'
    # value as neurons on hidden layer, 10 neurons on output layer.
    net = neural_network.Network([784, hidden_neurons, 10])

    # Train the network with 'epochs' value epochs, 'mini_batch_size' digits as mini-batch size,
    # 'learning_rate' as learning rate and test feedback if 'training_feedback' is true.
    print_information("FNN - Quadratic Cost", hidden_neurons, epochs,
                      mini_batch_size, learning_rate)
    net.stochastic_gradient_descent(
        training_data,
        epochs,
        mini_batch_size,
        learning_rate,
        test_data=training_dataset if training_feedback else None)
def average_validation_data():
		training_data, validation_data, test_data = np.asarray(mnist_loader.load_data())
		d1 = defaultdict(int)
		d2 = defaultdict(float)
		# Creating two dictionaries for the average of grayscale values and the frequncies of the digits
		for image_data, digit_frequency_data in zip(training_data[0], training_data[1]):
			d1[digit_frequency_data] += 1
			d2[digit_frequency_data] += image_data.mean(axis=0)

		print(d1)
		print(d2)
		# 
		average = defaultdict(float)
		for key, value_frequency in d1.iteritems():
			average[key] = round((d2[key]/value_frequency),4)
		print(average) 

		# Testing above algorithm on test_data
		count = 0
		for test_image_data, test_label_data in zip(test_data[0], test_data[1]):
			average_grayscale_test_value = round(test_image_data.mean(axis=0),4)
			tolerance = abs(average_grayscale_test_value - average[test_label_data])
			#print(tolerance)
			if (tolerance <=0.0005) :
				#print(tolerance)
				#print(average[test_label_data])
				count += 1
		print(count)
Пример #3
0
def raport4():
    print("raport4")
    training_data, validation_data, test_data = ml.load_data()  # inputs - 784, ouptuts - 10
    neural_network = cnv.Conv(784, 1, 10, alpha=0.005, max_epochs=20, acc_freeze=14,
                                      default_hlayer_neuron_numbers=50, batch_size=100, winit=cnv.XAVIER, activation_function=cnv.SIG, optimalization=cnv.ADAM)
    start = datetime.now()
    start_time = start.strftime("%H:%M:%S")
    t0 = time.clock()
    print("start_time")
    print(start_time)
    training_data = training_data[0][0: 1000], training_data[1][0:1000]
    validation_data = validation_data[0][0: 100], validation_data[1][0:100]
    training_errors, val_errors, val_accuracy, stop_reason = neural_network.train(training_data[0], training_data[1], validation_data[0], validation_data[1])
    end = datetime.now()
    end_time = end.strftime("%H:%M:%S")
    t1 = time.clock()
    print("end_time")
    print(end_time)
    elapsed = t1 - t0
    print("elapsed time")
    print(str(timedelta(seconds=elapsed)))

    print("celnosc: ")
    # accuracy = neural_network.accuracy(test_data[0], test_data[1])
    accuracy = 0
    print(accuracy)

    printresults2("conv-test", training_errors, val_errors, val_accuracy, stop_reason, accuracy)
Пример #4
0
def main2():
    dnn = DNN(input=28 * 28,
              layers=[DropoutLayer(160, LQ),
                      Layer(10, LCE)],
              eta=0.05,
              lmbda=1)  # 98%
    dnn.initialize_rand()
    train, test, vadilation = load_mnist_simple()

    f_names = [f'mnist_expaned_k0{i}.pkl.gz' for i in range(50)]
    shuffle(f_names)
    for f_name in f_names:
        print(f_name)
        with timing("load"):
            raw_data = load_data(f_name)
        with timing("shuffle"):
            shuffle(raw_data)
        with timing("reshape"):
            data = [(x.reshape((784, 1)), y)
                    for x, y in islice(raw_data, 100000)]
            del raw_data
        with timing("learn"):
            dnn.learn(data)
        del data
        print('TEST:', dnn.test(test))
Пример #5
0
def raport2_1():
    repeat_times = 3
    training_data, validation_data, test_data = ml.load_data()  # inputs - 784, ouptuts - 10

    start = datetime.now()
    start_time = start.strftime("%H:%M:%S")
    t0 = time.clock()
    print("start_time")
    print(start_time)

    neural_network = mlp.Mlperceptron(784, 1, 10, alpha=0.1, weight_random=0.3, max_epochs=50, acc_freeze=14,
                                      default_hlayer_neuron_numbers=300, batch_size=100, default_act=mlp.SIG)
    hidden_layer_weights_r, bias_layer_r = neural_network.get_weights()  # zapamiętaj najlepsze wagi w tym przypadku poczatkowo wylosowane
    hidden_layer_weights = copy.deepcopy(hidden_layer_weights_r)
    bias_layer = copy.deepcopy(bias_layer_r)


    # pierwszy parametr
    for i in range(repeat_times):
        neural_network = mlp.Mlperceptron(784, 1, 10, alpha=0.1, weight_random=0.3, max_epochs=50, acc_freeze=14,
                                          default_hlayer_neuron_numbers=1200, batch_size=100, default_act=mlp.SIG)
        neural_network.set_weights(copy.deepcopy(hidden_layer_weights_r), copy.deepcopy(bias_layer_r))
        training_errors, val_errors, val_accuracy, stop_reason = neural_network.train(training_data[0], training_data[1], validation_data[0], validation_data[1])
        accuracy = neural_network.accuracy(test_data[0], test_data[1])
        tname = "300neurons-" + str(i+1)
        printresults2(tname, training_errors, val_errors, val_accuracy, stop_reason, accuracy)

    end = datetime.now()
    end_time = end.strftime("%H:%M:%S")
    t1 = time.clock()
    print("end_time")
    print(end_time)
    elapsed = t1 - t0
    print("elapsed time")
    print(str(timedelta(seconds=elapsed)))
Пример #6
0
 def __init__(self):
     self.training_data, self.validation_data, self.test_data = mnist_loader.load_data(
     )
     #print training_data[0][0].shape, training_data[0][0].shape
     # train
     self.clf = svm.SVC()
     self.clf.fit(self.training_data[0][:1000],
                  self.training_data[1][:1000])
     # test
     self.predictions = [
         int(a) for a in self.clf.predict(self.test_data[0])
     ]
     self.true_positives = [
         (self.test_data[0][idx], p[0], p[1])
         for idx, p in enumerate(zip(self.predictions, self.test_data[1]))
         if p[0] == p[1]
     ]
     num_correct = sum(
         int(a == y) for a, y in zip(self.predictions, self.test_data[1]))
     #print 'simple black box classifier using an SVM'
     logger.info('black box svm accuray: %f' %
                 (float(num_correct) / float(len(self.test_data[1]))))
     joblib.dump(
         self.clf,
         '/home/ubuntu/machinelearning/src/nicefolk/simple_svm.pkl')
Пример #7
0
def main():
    training_set, validation_set, test_set = mnist_loader.load_data()
    images = get_images(test_set)
    # plot_rotated_image(images[0])
    image_number = 1503
    plot_mnist_digit(images[image_number])
    plot_images_together(images[101:200])
Пример #8
0
def mnist_load_data(dirn='../data'):
    '''
      Load MNIST data from files
    '''
    X_train, y_train = load_data(dirn, subset='train')
    X_test, y_test = load_data(dirn, subset='test')

    # reshape image data to flat
    X_train = X_train.reshape([-1, 28*28])
    X_test = X_test.reshape([-1, 28*28])

    # scaling image data from [0 .. 255] to [0.0 .. 1.0]
    X_train = X_train / 255.
    X_test = X_test / 255.

    return X_train, X_test, y_train, y_test
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
Пример #10
0
def main():
    training_data, validation_data, test_data = mnist_loader.load_data()
    avgs = avg_darknesses(training_data)
    num_correct = sum(
        int(guess_digit(image, avgs) == digit)
        for image, digit in zip(test_data[0], test_data[1]))
    print("Baseline classifier using average darkness of images.")
    print("{0} of {1} values correct.".format(num_correct, len(test_data[1])))
Пример #11
0
def main():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # training phase: compute the average darknesses for each digit, based on the training data
    avgs = avg_darknesses(training_data)
    # testing phase: see how many of the test images are classified correctly
    num_correct = sum(
        int(guess_digit(image, avgs) == digit)
        for image, digit in zip(test_data[0], test_data[1]))
    print("Baseline classifier using average darkness of image.")
    print("{0} of {1} values correct.".format(num_correct, len(test_data[1])))
Пример #12
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print('The evaluaion results:')
    print('%s of %s values correct.' % (num_correct, len(test_data[1])))
Пример #13
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
def main():
	"""The main script to model and train the model""" 
	# load the data
	training_data, validation_data, test_data = mnist_loader.load_data()
	# training phase: compute the average darkness of each digit
	avgs = avg_darknesses(training_data)
	# testing phase: evaluate the model
	num_correct = sum(int(guess_digit(image, avgs) == digit)
		for image, digit in zip(test_data[0], test_data[1]))
	print('The evaluation results:')
	print('%s of %s values correct.' % (num_correct, len(test_data[1])))
Пример #15
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()
    # training_data[0] is 50000 images, training_data[1] is 50000 labels
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print("Baseline classifier using an SVM.")
    print(str(num_correct) + " of " + str(len(test_data[1])) + " values correct.")
def main():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # training phase: compute the average darknesses for each digit,
    # based on the training data
    avgs = avg_darknesses(training_data)
    # testing phase: see how many of the test images are classified
    # correctly
    num_correct = sum(int(guess_digit(image, avgs) == digit)
                      for image, digit in zip(test_data[0], test_data[1]))
    print("Baseline classifier using average darkness of image.")
    print("{0} of {1} values correct.".format(num_correct, len(test_data[1])))
Пример #17
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    #print training_data[0][0].shape, training_data[0][0].shape
    # train
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
    joblib.dump(clf, '/home/ubuntu/machinelearning/src/nicefolk/simple_svm.pkl')
def main():
    training_data, validation_data, test_data = mnist_loader.load_data(
    )  # 这里的每行数据是个元组(x数据,y标签)
    # training phase: compute the average darknesses for each digit,
    # based on the training data
    avgs = avg_darknesses(training_data)
    # testing phase: see how many of the test images are classified
    # correctly
    num_correct = sum(
        int(guess_digit(image, avgs) == digit)
        for image, digit in zip(test_data[0], test_data[1]))
    print "Baseline classifier using average darkness of image."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
Пример #19
0
def main(k):
	training_data, validation_data, test_data = mnist_loader.load_data()
	k = int(k[0])
	predictions=[]
	for i in range(	len(test_data[0])):
		neighbors=getNeighbors(training_data[0],test_data[0][i],training_data[1],k)
		result=getResponse(neighbors)
		predictions.append(result)
	confusionMatrix = CreateConfusionMatrix(predictions,test_data[1])
	avgPrecision, avgRecall, avgSpecificity, precision, recall, specificity = CalculatePrecisionAndRecall(confusionMatrix, 10, len(test_data[1]))
	PrintResults(confusionMatrix, avgPrecision, avgRecall, avgSpecificity, precision, recall, specificity)
	accuracy = GetAccuracy(test_data[1], predictions)
	print "Accuracy :", accuracy
Пример #20
0
def deep_net_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()

    X = training_data[0]
    Y = training_data[1]

    # one_hot_encoding and reshape both (Transpose - flip rows/columns, so m=50.000=columns)
    X_reshaped = X.reshape(784, 50000)
    Y_reshaped = one_hot(Y, 10).reshape(10, 50000)

    # train
    layer_dims = [784, 20, 25, 15, 10] #  5-layer model
    parameters = network.L_layer_model(X_reshaped, Y_reshaped, layer_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False)
def run_svms():
    svm_training_data, svm_validation_data, svm_test_data = mnist_loader.load_data()
    accuracies = []
    for size in SIZES:
        print("\n\nTraining SVM with data set size %s" %size)
        clf = svm.SVC()
        clf.fit(svm_training_data[0][:size],svm_training_data[1][:size])
        predictions = [int(a) for a in clf.predict(svm_validation_data[0])]
        accuracy = sum(int(a==y) for a,y in zip(predictions, svm_validation_data[1])) / 100.0
        print("Accuracy was %s percent" %accuracy)
        accuracies.append(accuracy)
    f = open("more_data_svm.json","w")
    json.dump(accuracies,f)
    f.close()
Пример #22
0
def load_data_wrapper():
    """wrapping data for network input"""
    tr_d, va_d, te_d = load_data()
    tr_d = (shift_data(tr_d[0]), tr_d[1])
    va_d = (shift_data(va_d[0]), va_d[1])
    te_d = (shift_data(te_d[0]), te_d[1])
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = list(zip(training_inputs, training_results))
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = list(zip(validation_inputs, va_d[1]))
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = list(zip(test_inputs, te_d[1]))
    return (training_data, validation_data, test_data)
Пример #23
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    
    # train
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    confusionMatrix = CreateConfusionMatrix(predictions, test_data[1])
    avgPrecision, avgRecall, avgSpecificity, precision, recall, specificity = CalculatePrecisionAndRecall(confusionMatrix, 10, len(test_data[1]))
    PrintResults(confusionMatrix, avgPrecision, avgRecall, avgSpecificity, precision, recall, specificity)
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
Пример #24
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    #http://peekaboo-vision.blogspot.de/2010/09/mnist-for-ever.html
    gamma =  0.00728932024638  #kernel coefficient
    C = 2.82842712475   #penalty parameter  of error term

    clf = svm.SVC(C=C,gamma=gamma)
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
def run_svms():
    svm_training_data, svm_validation_data, svm_test_data = mnist_loader.load_data()
    accuracies = []
    for size in SIZES:
        print "\n\nTraining SVM with data set size %s" % size
        clf = svm.SVC()
        clf.fit(svm_training_data[0][:size], svm_training_data[1][:size])
        predictions = [int(a) for a in clf.predict(svm_validation_data[0])]
        accuracy = sum(int(a == y) for a, y in zip(predictions, svm_validation_data[1])) / 100.0
        print "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data_svm.json", "w")
    json.dump(accuracies, f)
    f.close()
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()

    trainingSize = 2000
    clf.fit(training_data[0][:trainingSize], training_data[1][:trainingSize])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0][:predictionSize])]
    num_correct = sum(int(a == y) for a, y in zip(predictions[:predictionSize], test_data[1][:predictionSize]))
    print "%s of %s values correct." % (num_correct, predictionSize)
    fig = plt.figure()
    pageIndex = 0
    drawFigure(fig, test_data, predictions, pageIndex)
    plt.show()
Пример #27
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()

    # train the model
    print('Training SVM classifier on the mnist dataset.')
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])

    # test te model
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print('Results:')
    print('Accuracy: ', num_correct / len(test_data[1]))
    print(f'%d out of %d classified correctly' %
          (num_correct, len(test_data[1])))
Пример #28
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()

    # Initialize a support vector classifier
    clf = svm.SVC()

    # Train the classifier
    clf.fit(X=training_data[0], y=training_data[1])

    # Get predictions
    predictions = [int(a) for a in clf.predict(X=test_data[0])]

    num_correct = sum(a == y for a, y in zip(predictions, test_data[1]))

    print("Baseline classifier using an SVM")
    print("%s of %s values correct" % (num_correct, len(test_data[1])))
Пример #29
0
def main(k):
    training_data, validation_data, test_data = mnist_loader.load_data()
    k = int(k[0])
    predictions = []
    for i in range(len(test_data[0])):
        neighbors = getNeighbors(training_data[0], test_data[0][i],
                                 training_data[1], k)
        result = getResponse(neighbors)
        predictions.append(result)
    confusionMatrix = CreateConfusionMatrix(predictions, test_data[1])
    avgPrecision, avgRecall, avgSpecificity, precision, recall, specificity = CalculatePrecisionAndRecall(
        confusionMatrix, 10, len(test_data[1]))
    PrintResults(confusionMatrix, avgPrecision, avgRecall, avgSpecificity,
                 precision, recall, specificity)
    accuracy = GetAccuracy(test_data[1], predictions)
    print "Accuracy :", accuracy
Пример #30
0
def svm_baseline():
    print('LOADING DATA...')
    training_data, validation_data, test_data = mnist_loader.load_data(
        'G:\Pycharmworkspace\MachineLearning_python\MaiZi-Course\data\mnist.pkl.gz'
    )
    print(training_data[1])
    print('DONE.')
    # train
    print('TRAINING...')
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    print('TESTING...')
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print("Baseline classifier using an SVM.")
    print("%s of %s values correct." % (num_correct, len(test_data[1])))
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()

    trainingSize = 2000
    clf.fit(training_data[0][:trainingSize], training_data[1][:trainingSize])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0][:predictionSize])]
    num_correct = sum(
        int(a == y) for a, y in zip(predictions[:predictionSize], test_data[1]
                                    [:predictionSize]))
    print "%s of %s values correct." % (num_correct, predictionSize)
    fig = plt.figure()
    pageIndex = 0
    drawFigure(fig, test_data, predictions, pageIndex)
    plt.show()
Пример #32
0
def svm_baseline():
    training_data, validation_data, test_data = mnist.load_data()
    # train
    t0 = time.time()
    clf = svm.SVC(kernel='rbf', degree=4)
    from sklearn import preprocessing
    X_train = preprocessing.scale(training_data[0])
    scaler = preprocessing.StandardScaler().fit(training_data[0])
    clf.fit(X_train, training_data[1])
    print("Training time: ", time.time() - t0)
    # test
    t0 = time.time()
    X_test = scaler.transform(test_data[0])
    predictions = [int(a) for a in clf.predict(X_test)]
    print("testing time:", time.time() - t0)
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print("Baseline classifier using an SVM.")
    print("%s of %s values correct." % (num_correct, len(test_data[1])))
Пример #33
0
def raport4_2():
    print("raport4_2")
    repeat_times = 3
    training_data, validation_data, test_data = ml.load_data()  # inputs - 784, ouptuts - 10

    neural_network = mlp.Mlperceptron(784, 1, 10, alpha=0.005, max_epochs=20, acc_freeze=14,
                                      default_hlayer_neuron_numbers=50, batch_size=100, winit=mlp.XAVIER,
                                      activation_function=mlp.SIG, optimalization=mlp.ADAM)
    hidden_layer_weights_r, bias_layer_r = neural_network.get_weights()  # zapamiętaj najlepsze wagi w tym przypadku poczatkowo wylosowane
    hidden_layer_weights = copy.deepcopy(hidden_layer_weights_r)
    bias_layer = copy.deepcopy(bias_layer_r)

    start = datetime.now()
    start_time = start.strftime("%H:%M:%S")
    t0 = time.clock()
    print("start_time")
    print(start_time)

    training_data = training_data[0][0: 1000], training_data[1][0:1000]
    validation_data = validation_data[0][0: 100], validation_data[1][0:100]

    for i in range(repeat_times):
        neural_network = mlp.Mlperceptron(784, 1, 10, alpha=0.005, max_epochs=20, acc_freeze=14,
                                          default_hlayer_neuron_numbers=50, batch_size=100, winit=mlp.XAVIER,
                                          activation_function=mlp.SIG, optimalization=mlp.ADAM)
        neural_network.set_weights(copy.deepcopy(hidden_layer_weights_r), copy.deepcopy(bias_layer_r))
        training_errors, val_errors, val_accuracy, stop_reason = neural_network.train(training_data[0],
                                                                                      training_data[1],
                                                                                      validation_data[0],
                                                                                      validation_data[1])
        # accuracy = neural_network.accuracy(test_data[0], test_data[1])
        accuracy = 0
        tname = "conv-mlp-" + str(i + 1)
        printresults2(tname, training_errors, val_errors, val_accuracy, stop_reason, accuracy)


    end = datetime.now()
    end_time = end.strftime("%H:%M:%S")
    t1 = time.clock()
    print("end_time")
    print(end_time)
    elapsed = t1 - t0
    print("elapsed time")
    print(str(timedelta(seconds=elapsed)))
Пример #34
0
def hyper_parameters():
    """Defining hyper-parameters can be a hard task. To do so, there are some heuristics that can help."""

    # Load Traning datasets
    training_data, validation_data, test_data = mnist_loader.load_data()
    training_data = list(training_data)
    validation_data = list(validation_data)
    test_data = list(test_data)

    # Board Strategy
    # board_strategy(training_data, validation_data, test_data)

    # Learning Rate Strategy
    # learning_rate_strategy(training_data, validation_data, test_data)

    # Early Stopping Strategy
    #early_stopping_strategy(training_data, validation_data, test_data)

    # Learning Rate Scheduler Strategy
    learning_rate_scheduler_strategy(training_data, validation_data, test_data)
Пример #35
0
def main():
    import mnist_loader
    training_data, validation_data, test_data = mnist_loader.load_data()
    import network
    import utils
    net = network.Network([784, 30, 30, 30, 10],
                          init=utils.NormalWeightInitializer,
                          cost=utils.CrossEntropyCost,
                          norm=utils.L2Regularizer(lmbda=0.0001))
    _, evaluation_accuracy, _, _ = net.SGD(
        30,
        10,
        .14,
        training_data,
        test_data,
        # early_stop=utils.NoImprovementInN(10),
        # learning_rate_adjustment=utils.NoImprovementInN(10),
        monitor_evaluation_accuracy=True)

    from fig import plot
    plot(evaluation_accuracy)
Пример #36
0
def singletest():
    print("single_test")
    training_data, validation_data, test_data = ml.load_data() # inputs - 784, ouptuts - 10
    neural_network = mlp.Mlperceptron(784, 1, 10, alpha=0.1, max_epochs=50, acc_freeze=9, default_hlayer_neuron_numbers=50, batch_size=100, optimalization=mlp.ADAM, opteta=0.9, default_act=mlp.SIG, winit=mlp.XAVIER)
    #  neural_network.train(training_data[0], training_data[1], validation_data[0], validation_data[1])
    start = datetime.now()
    start_time = start.strftime("%H:%M:%S")
    t0 = time.clock()
    print("start_time")
    print(start_time)
    neural_network.train(training_data[0], training_data[1], validation_data[0], validation_data[1])
    end = datetime.now()
    end_time = end.strftime("%H:%M:%S")
    t1 = time.clock()
    print("end_time")
    print(end_time)
    elapsed = t1 - t0
    print("elapsed time")
    print(str(timedelta(seconds=elapsed)))

    print("celnosc: ")
    print(neural_network.accuracy(test_data[0], test_data[1]))
Пример #37
0
def svm_baseline():
    training_data, validation_data, test_data = mnist_loader.load_data()
    # train
    clf = svm.SVC()
    clf.fit(training_data[0], training_data[1])
    # test
    predictions = [int(a) for a in clf.predict(test_data[0])]
    num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
    print "Baseline classifier using an SVM."
    print "%s of %s values correct." % (num_correct, len(test_data[1]))
    # finally, plot the first ten images where the classifier fails
    failure_indices = [j for (j, z) in enumerate(zip(predictions, test_data[1]))
                       if z[0] != z[1]]
    failed_images = [np.reshape(test_data[0][failure_indices[j]], (-1, 28))
                     for j in xrange(10)]
    fig = plt.figure()
    for j in xrange(1, 11):
        ax = fig.add_subplot(1, 10, j)
        ax.matshow(failed_images[j-1], cmap = matplotlib.cm.binary)
        plt.xticks(np.array([]))
        plt.yticks(np.array([]))
    plt.show()
Пример #38
0
from tempfile import TemporaryFile
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
tr_d, va_d,te_d = mnist_loader.load_data()
import network
net =network.Network([784,64,32,10]) #network architecture design
np.savez('weightsinitial',weights=net.weights)
net.SGD(training_data, 1,10,.1 )
np.savez('weightsafter1',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter10',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter20',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter30',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter40',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter50',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter60',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter70',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter80',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
np.savez('weightsafter90',weights=net.weights)
net.SGD(training_data, 10,10,0.1 )
set affects the classification accuracy of an SVM and a neural network
classifier.  The training and test data is drawn from the MNIST data
set.
"""

#### Libraries
# My libraries
import mnist_nn
import mnist_loader 

# Third-party libraries
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm

svm_training_data, _, svm_test_data = mnist_loader.load_data()
nn_training_data, nn_test_inputs, nn_actual_test_results = mnist_nn.load_data()
sizes = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000] 
svm_results, nn_results = [], []
for size in sizes:
    print "\nData set size: %s" % size
    # SVM results
    clf = svm.SVC()
    clf.fit(svm_training_data[0][:size], svm_training_data[1][:size])
    predictions = [int(a) for a in clf.predict(svm_test_data[0])]
    svm_results.append(
        sum(int(a == y) for a, y in zip(predictions, svm_test_data[1])))
    print "SVM result: %s /  10000" % svm_results[-1]
    # Neural network results
    net = mnist_nn.Network([784, 20, 10])
    epochs = 1500000/size
Пример #40
0
    Input:
        - veclist: a list of Vecs
    Output:
        - a Vec, the centroid of veclist
    Example:
        >>> from vecutil import list2vec
        >>> vs = [list2vec(l) for l in [[1,2,3],[2,3,4],[9,10,11]]]
        >>> find_centroid(vs)
        Vec({0, 1, 2},{0: 4.0, 1: 5.0, 2: 6.0})
    '''
    pass


# Load training and testing data
n_train, n_test = 3000, 100
images, labels = load_data(n_train+n_test)
train_images, test_images = images[:n_train], images[n_train:]
train_labels, test_labels = labels[:n_train], labels[n_train:]



## 6: Raw Error Rate
raw_nn_error_rate = ...



## 7: Training Centroid
centroid = ...


def main():
    training_set, validation_set, test_set = mnist_loader.load_data()
    images = get_images(training_set)
    plot_rotated_image(images[0])
import matplotlib.pyplot as plt
import numpy as np
import math, random
import threading
from multiprocessing import Process, Queue
import time
from sklearn import svm

numRepetitions = 12 # Repeat experiment for the sake of precision
predictionSize = 150.
numProcessors = 4
plotInterval = 1

# Putting data sets in memory
training_data, validation_data, test_data = mnist_loader.load_data()
# Arrays containing results of proportion correct vs sample size given
q = Queue()
q.put([[],[]])
def iterate(times):
	for t in xrange(0,times):
		a = 10
		b = .01
		c = 0
		exp = lambda x: int(a ** ((x * b) + c))
		log = lambda x: int(math.log(x - c, a) / b)
		# sizes = map(exp, xrange(log(5), log(len(training_data[0]) - 1),20))
		sizes = map(exp, xrange(log(5), log(5000),plotInterval))
		for trainingSize in sizes:
			clf = svm.SVC()
			# Array to contain sampled images and answers