예제 #1
0
def main(): 
    
    global exit_clicked
    global keyPressed
    keyPressed = False
    exit_clicked = False
    plt.ioff()
    plt.show()
    global mode 
    mode = 3
    if len(sys.argv) > 1:
        mode = int(sys.argv[1])

    fig = plt.plot()
    plt.switch_backend('TKAgg')
    figManager = plt.get_current_fig_manager()
    figManager.window.state('zoomed')
    if mode == 0:
        print "Net 2, 10, 10, 2"
        img = 0.5*np.ones((140,140))
        net = network2.Network([2, 10, 10, 2], cost=network2.CrossEntropyCost)
        net.large_weight_initializer()

        myobj = NNIMG(fig, img, net)
        plt.gca().invert_yaxis()  
        plt.draw()
    if mode == -1:
        print "Net 2, 2, 2"
        img = 0.5*np.ones((140,140))
        net = network2.Network([2, 2, 2], cost=network2.CrossEntropyCost)
        net.large_weight_initializer()

        myobj = NNIMG(fig, img, net)
        plt.gca().invert_yaxis()  
        plt.draw()
    
    if mode == 1:       
        train ('C:/Users/ThomasReichert/source/repos/TheLIFO/NeuralNetMINT/trainednet2')

    if mode == 2:           
        img = np.zeros((28,28))
    
        net = network2.load('C:/Users/ThomasReichert/source/repos/TheLIFO/NeuralNetMINT/trainednet2')
        myobj = drawableIMG(fig, img, net)
       

    if mode == 3:
        training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
        img = np.zeros((28,28))
        net = network2.load('C:/Users/ThomasReichert/source/repos/TheLIFO/NeuralNetMINT/trainednet2')

        myObj = testIMGs(fig, img, net, test_data)
       
    plt.ion()
    
    while not(exit_clicked):       
        plt.pause(0.001)

          
    print "exit"
예제 #2
0
    def run(self):
        print 'Worker thread run!'
        self.calcFinish = False
        
        self.net = network2.load('init.net')
        self.net.regType = Config.regType
        self.net.WEParaQ = Config.WEParaQ
        self.net.SmFun = Config.SmFun
        self.net.SmPa = Config.SmPa
        self.net.iter = Config.iter
        self.net.eta = Config.eta
        self.net.lmbda = Config.lmbda

        evaluation_cost, evaluation_accuracy, \
        training_cost, training_accuracy = \
        self.net.SGD(training_data, Config.iter, 50, Config.eta,
                lmbda = Config.lmbda,
                evaluation_data=test_data,
                monitor_evaluation_cost=True,
                monitor_evaluation_accuracy=True,
                monitor_training_cost=True,
                monitor_training_accuracy=True)
        self.net.evaluation_cost = evaluation_cost
        self.net.evaluation_accuracy = evaluation_accuracy
        self.net.training_cost = training_cost
        self.net.training_accuracy = training_accuracy
        self.calcFinish = True
        print 'Worker thread finish!'
예제 #3
0
def getNeuralNetFromUser():
	neural_net_file = "resources/neural_net" #JSON in a text file used to load the neural network
	net = None
	print "Load Neural Network from file?"
	value = getInput("-1 for training a new network, other key to load a trained one: ")
	if (value == '-1'):
		net_layers = [TOTAL_SIZE] #List of neurons, input layer == N pixels
		i =  1
		print "For each layer, insert the number of neurons\nInsert -1 to finish: "
		while(True):
			s_layer = "Layer {}: ".format(i)
			layer = int(getInput(s_layer))
			if(layer == -1):
				break
			net_layers.append(layer)
			i += 1
		net_layers.append(N_ELEMENTS) #Output layer == N possible output values
		net = network2.Network(net_layers, cost=network2.CrossEntropyCost)
		net.large_weight_initializer()
	else:
		value = getInput("-1 for specifying the neural network file. Other to load the default '{}': ".format(neural_net_file))
		if(value == '-1'):
			neural_net_file = getInput("Insert file name of the neural net to be loaded: ")
			while(True):
				if (isfile(neural_net_file)):
					break
				neural_net_file = getInput("Insert file name of the neural net to be loaded: ")
				print "Name invalid, please try again"
		net = network2.load(neural_net_file) #Loads an existing neural network from a file
	return net
예제 #4
0
파일: mlp.py 프로젝트: lkshay/NN_work
def main():
    epochs = 1000
    # load train_data, valid_data, test_data
    train_data, valid_data, test_data = load_data()
    # construct the network
    
    # model = network2.Network([784, 60, 10])
    model = network2.load("mymodel2.json")
    #train the network using SGD

    [evaluation_cost, evaluation_accuracy,training_cost, training_accuracy] = model.SGD(
        training_data=train_data,
        epochs=epochs,
        mini_batch_size=500,
        eta=1.9e-3,
        lmbda = 0.00001,
        evaluation_data=test_data,
        monitor_evaluation_cost=True,
        monitor_evaluation_accuracy=True,
        monitor_training_cost=True,
        monitor_training_accuracy=True)

    epoch_list = []

    for i in range(epochs):
        epoch_list.append(i)

    # print((epoch_list),(training_cost))
    plt.figure()    
    plt.plot(epoch_list,training_cost,"r-")
    plt.ylabel("Training Cost")
    plt.xlabel("Epochs")
    plt.figure()
    plt.plot(epoch_list,training_accuracy,"r-")
    plt.ylabel("Training Accuracy")
    plt.xlabel("Epochs")
    plt.figure()
    plt.plot(epoch_list,evaluation_cost,"b-")
    plt.ylabel("Evaluation Cost")
    plt.xlabel("Epochs")
    plt.figure()
    plt.plot(epoch_list,evaluation_accuracy,"b-")
    plt.ylabel("Evaluation Accuracy")
    plt.xlabel("Epochs")
    
    model.save("mymodel2.json")
    
    # sleep(2)
    
    # trained_model = network2.load("mymodel.json")
    # test_result = trained_model.accuracy(test_data,convert = False)
    # print test_result

    plt.show()
예제 #5
0
def main():
    global exit_clicked
    exit_clicked = False
    plt.ioff()
    plt.show()
    global mode
    mode = 0
    if len(sys.argv) > 1:
        if sys.argv[1] == '1': mode = 1
        if sys.argv[1] == '2': mode = 2

    fig, axs = plt.subplots(2, 2)

    if mode == 1:
        training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
        )
        #train ('trainednet2')

        fig.canvas.draw()
        img = np.zeros((28, 28))

        net = network2.load(
            'C:/Users/ThomasReichert/source/repos/TheLIFO/NeuralNetMINT/trainednet2'
        )
        myobj = drawableIMG(fig, img, net)
    if mode == 2:
        img = 0.5 * np.ones((140, 140))
        net = network2.Network([2, 10, 10, 2], cost=network2.CrossEntropyCost)
        net.large_weight_initializer()

        myobj = NNIMG(fig, img, net)

        plt.draw()

    plt.ion()

    while not (exit_clicked):
        plt.pause(0.001)

    #obj = axs[1, 1].imshow(np.reshape(test_data[0][0],(28,28)),vmin=0, vmax=1)

    #for i in range(1,10):
    #    obj.set_data(np.reshape(test_data[i][0],(28,28)))
    #    x = net1.feedforward(test_data[i][0])
    #    x = np.argmax(x)
    #    result ="result: " + str(x)
    #    print 'result:', x
    #    plt.suptitle(result)

    #    fig.canvas.draw()
    # wait()

    print "exit"
예제 #6
0
def y_testdata_y_predict():
    net = network2.load("../data/Network2.bin")
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    y_test = []
    y_predict = []

    for x, y in test_data:
        y = network2.vectorized_result(y).tolist()
        y_test.append(y)
        y_predict.append(
            network2.vectorized_result(np.argmax(net.feedforward(x))).tolist())
    return y_test, y_predict
def y_testdata_y_predict(input_File_Network, input_File_Test_Data):
    Net = network2.load(input_File_Network)
    file = open(input_File_Test_Data, 'r')
    data = cp.load(file)
    file.close()
    y_test = []
    y_predict = []
    for x, y in data:
        y = vectorization(y).tolist()
        x = np.reshape(x, (347, 1))
        y_test.append(y)
        y_predict.append(vectorization(np.argmax(Net.feedforward(x))).tolist())
    return y_test, y_predict
예제 #8
0
    latexstr = gen_report.generate_latex(args.instructor, args.course,
                                         args.term, histogram, forms)
    current_path = os.getcwd()
    os.chdir(project_path)
    outfile = open("report.tex", 'w')
    outfile.write(latexstr)
    outfile.close()
    run(["pdflatex", "report.tex"])
    run(["pdflatex", "report.tex"])
    os.chdir(current_path)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("project_name", help="Name of the project")
    parser.add_argument("eval_scan", help="Path to scanned pdf")
    parser.add_argument("instructor", help="Name of instructor")
    parser.add_argument("course", help="Course name")
    parser.add_argument("term", help="The eval term")
    parser.add_argument(
        "--dest",
        help="Directory to create project in (default is \'.\')",
        default=".")
    args = parser.parse_args()
    path = make_project(args.dest, args.project_name, args.eval_scan)
    net = network2.load(header.network)
    histogram, forms = process_project(path, net)
    create_report(path, args.instructor, args.course, args.term, histogram,
                  forms)
    print "Report is located at %s/report.pdf" % (path)
예제 #9
0
파일: ANN2.py 프로젝트: ztq09290929/AnnTest
        trainingdata, validationdata= mnist_loader.load_data_wrapper_lpr()
        print 'The sum of trainingdata is: ',len(trainingdata)
        print 'The sum of validationdata is: ',len(validationdata)
        #net = network2.Network([784,30,10],cost = network2.CrossEntropyCost)
        net = network2.Network([400,100,65],cost = network2.CrossEntropyCost)
        print 'The num of neural :',net.sizes
        print 'Start training...'
        ec,ea,tc,ta = net.SGD(trainingdata, 80, 20, 0.01, \
                        5.0,validationdata ,\
                        True,True,True,True)
        print 'Training complete!'
        #net.save('AnnTrainedParas2.txt')
        net.save('AnnTrainedParas3.txt')
        print 'Saving paras complete! \nThe form is dict.'   
        ea = [item/float(len(validationdata)) for item in ea]
        ta = [item/float(len(trainingdata)) for item in ta]
        network2.show_result(ec,ea,tc,ta)
    
    else:
        net2 = network2.load('AnnTrainedParas2.txt')
        print 'after:',net2.biases
        print "各层偏置的维数:"
        for a in net2.biases:
            print a.shape
        print "各层权值的维数:"
        for b in net2.weights:
            print b.shape    
     
    
    
예제 #10
0
# SH-I

import loader
import network2
import numpy as np

glycan = input('Enter Glycan IUPAC: ')

net = network2.load('net.json')
gly_enc = loader.encoder(glycan)

binding = loader.classify(net.feedforward(gly_enc))

print('This glycan has a ' + binding + ' binding to DC_SIGN.')
예제 #11
0
main.py

main method to run the neural network calculation
"""
import stock_loader
import network
import network2


n_input = 366
n_neutral_neuron = 100
n_output = 12
n_epoch = 50
n_batch_size = 500
coe_learn = 0.01
lmbda = 0.1
size = [n_input, n_neutral_neuron, n_output]
n_cycle = 50

for x in range(n_cycle):
	print "cycle number : {0:03d}".format(x+1)
	training_data, validation_data, test_data = stock_loader.load_data()
	"""network.py"""
	#net = network.Network(size)
	#net.SGD(training_data, n_epoch, n_batch_size, coe_learn, test_data=test_data)
	"""network2.py"""
	net = network2.load('properties.txt')
	#net = network2.Network(size, cost=network2.CrossEntropyCost())
	net.SGD(training_data, n_epoch, n_batch_size, coe_learn, lmbda = lmbda, evaluation_data=validation_data, monitor_evaluation_accuracy=True, monitor_evaluation_cost=True, monitor_training_accuracy=True, monitor_training_cost=True)
	net.save('properties.txt')
예제 #12
0
#### cv2.CascadeClassifier ####

# http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html#gsc.tab=0

print("\033[93mLoading CascadeClassifier files..\033[0m")
xml_carClassifier = "resources/coches.xml"
xml_plateClassifier = "resources/matriculas.xml"
carClassifier = cv2.CascadeClassifier(xml_carClassifier)
print(
    "\033[32mFile '{}' successfully loaded!\033[0m".format(xml_carClassifier))
plateCassifier = cv2.CascadeClassifier(xml_plateClassifier)
print("\033[32mFile '{}' successfully loaded!\033[0m".format(
    xml_plateClassifier))
print("\033[93mLoading Neural Network File..\033[0m")
neural_net_file = "resources/neural_net"
net = network2.load(neural_net_file)
print("\033[32mFile '{}' successfully loaded!\033[0m".format(neural_net_file))

print("\033[93mProcessing images..\033[0m")

for img in vImages:
    l_carsR = getCarsFromImage(img.img, carClassifier)
    for carR in l_carsR:
        car = Car(img.img, carR, plateCassifier)
        car.setPlateText(processPlateText(car, net))
        img.addCar(car)

#file = "testing_ocr.txt"
#f = open(file, 'w')

for img in vImages:
예제 #13
0
    def js_click(self):
        self.textEdit_gao.setPlainText('')
        self.textEdit_zhong.setPlainText('')
        self.textEdit_di.setPlainText('')

        if self.comboBox_zh.currentText() == '有':
            zh = 1.0
        else:
            zh = 0.0
        if self.comboBox_jy.currentText() == '有':
            jy = 1.0
        else:
            jy = 0.0
        if self.comboBox_jw.currentText() == '有':
            jw = 1.0
        else:
            jw = 0.0
        if self.comboBox_dk.currentText() == '有':
            dk = 1.0
        else:
            dk = 0.0
        if self.comboBox_gl.currentText() == '有':
            gl = 1.0
        else:
            gl = 0.0
        inputdata = [
            float(self.textEdit_cm.toPlainText()), zh, jy,
            float(self.textEdit_nx.toPlainText()),
            float(self.textEdit_pl.toPlainText()), jw, dk, gl,
            float(self.textEdit_gz.toPlainText()),
            float(self.textEdit_lh.toPlainText()),
            float(self.textEdit_ts.toPlainText()),
            float(self.textEdit_kr.toPlainText()),
            float(self.textEdit_zj.toPlainText()),
            float(self.textEdit_kzg.toPlainText()),
            float(self.textEdit_aq.toPlainText()),
            float(self.textEdit_ms.toPlainText()),
            float(self.textEdit_qt.toPlainText())
        ]
        with open('data.csv', 'w', newline='') as data2csv:
            # data2csv = open('data.csv', 'a', newline='')
            csv_write = csv.writer(data2csv, dialect='excel')
            csv_write.writerow(inputdata)
            data2csv.close()

        evaluation_data = list(qt5_data_loader.load_data("data.csv"))
        net = network2.load('dtbp_net.json')
        out = net.feedforward(evaluation_data[0]).tolist()
        risk = out.index(max(out))
        if risk == 0:
            self.textEdit_gao.setText('1')
            self.textEdit_zhong.setText('0')
            self.textEdit_di.setText('0')
        elif risk == 1:
            self.textEdit_gao.setText('0')
            self.textEdit_zhong.setText('1')
            self.textEdit_di.setText('0')
        elif risk == 2:
            self.textEdit_gao.setText('0')
            self.textEdit_zhong.setText('0')
            self.textEdit_di.setText('1')
예제 #14
0
def load_2(filename):
    net = network2.load(filename)
    return net
예제 #15
0
import network2
import notmnist_loader
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt

# Load the test data
training_data,test_data = notmnist_loader.add_noise2_loader()
#training_data,test_data = notmnist_loader.load_data_wrapper()

#Load the trained NN model
net = network2.load("models/nets_model_Adenoise")

def getFeaturesData(weight):
    """
    get feature data for each hidden unit
    :param weight: a vector of weights, eg.weights[0]
    :return:
    """
    data = []
    #Matrix = [[0 for x in range(10)] for x in range(10)]
    n = len(weight)
    divisors = np.sqrt(np.sum(weight**2, 1))
    for i in xrange(n):
        data.append(weight[i]/divisors[i])
        # Matrix[][i] = (np.reshape(weight[i]/divisors[i], (28, 28)))

    return data

def reconstructImage(data, fileName, dimensions=[28, 28]):
    """
예제 #16
0
vImages = loadImgs(vPath)

#### cv2.CascadeClassifier ####

# http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html#gsc.tab=0

print ("\033[93mLoading CascadeClassifier files..\033[0m")
xml_carClassifier = "resources/coches.xml"
xml_plateClassifier = "resources/matriculas.xml"
carClassifier = cv2.CascadeClassifier(xml_carClassifier)
print ("\033[32mFile '{}' successfully loaded!\033[0m".format(xml_carClassifier))
plateCassifier = cv2.CascadeClassifier(xml_plateClassifier)
print ("\033[32mFile '{}' successfully loaded!\033[0m".format(xml_plateClassifier))
print ("\033[93mLoading Neural Network File..\033[0m")
neural_net_file = "resources/neural_net"
net = network2.load(neural_net_file)
print ("\033[32mFile '{}' successfully loaded!\033[0m".format(neural_net_file))

print ("\033[93mProcessing images..\033[0m")

for img in vImages:
	l_carsR = getCarsFromImage(img.img, carClassifier)
	for carR in l_carsR:
		car = Car(img.img, carR, plateCassifier)
		car.setPlateText(processPlateText(car, net))
		img.addCar(car)

#file = "testing_ocr.txt"
#f = open(file, 'w')

for img in vImages:
예제 #17
0
'''
isn't working
https://github.com/skvark/opencv-python/issues/46
goto above link to see unresolved issue

cv2.namedWindow(filename[-1], cv2.WINDOW_NORMAL)
cv2.imshow(filename[-1], image)
key = cv2.waitKey(0) & 0xFF #passing zero makes it wait undefinitely for a key stroke
if key == 113: #ASCII decimal value for key 'q', on press, exit from image
    cv2.destroyAllWindows()
'''

#cv2.imwrite('../data/7-resized.png', image)
image = cv2.resize(image, (28, 28))
image = image.reshape(784, 1)
net_trained = network2.load('network2_params.json')
output = net_trained.feedforward(image)
output_digit = np.argmax(output)
'''
text = 'File name-\n' + filepath + '\n\nOutput layer activations-\n' + \
        str(output) + '\n\nRecognized digit-\n' + str(output_digit)
'''

filepath_new = filepath[:-4] + '_resized.jpg'

image = image.reshape(28, 28)
image = cv2.resize(image, (512, 512))
cv2.imwrite(filepath_new, image)

window = Gtk.Window(title=filename[-1])
예제 #18
0
파일: testRun_2_1.py 프로젝트: laputian/dml
import numpy as np
import network2
import mnist_loader

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = network2.load('net2')
imgIndex = 45
testImg = mnist_loader.load_data_wrapper()[2][imgIndex]

prediction = np.argmax(net.feedforward(testImg[0]))
value = testImg[1]

print "The predicted value is " + str(prediction) +" . The actual value is " + str(value) +"."

예제 #19
0
# training_data,test_data = notmnist_loader.load_data_wrapper()

### load original data
# train_data,valid_data, test_data, \
# train_labels,valid_labels, test_labels = notmnist_loader.add_noise2_loader()

### load_new_data
train_data, valid_data, train_labels, valid_labels = notmnist_loader.load_new_dataset()

### the first step. after you finished training this step, you can comment the folliwing line out
# testAutoEncoder(train_data, valid_data,save_fileName= "models/SAE_step1", sizes= [784, 500, 784],
#                 epochs=20, mini_batch_size=10, eta=0.1, lmbda=0.0)


net1 = network2.load("models/SAE_step1")
train_data1, valid_data1 = getIntermediateDataset(train_data, valid_data, net1)

### the second step, after you finished training this step, you can comment the folliwing line out
# testAutoEncoder(train_data1, valid_data1,save_fileName= "models/SAE_step2", sizes= [500, 100, 500],
#                 epochs=20, mini_batch_size=10, eta=0.1, lmbda=0.0)

net2 = network2.load("models/SAE_step2")
train_data2, valid_data2 = getLastDataset(train_data1, valid_data1, train_labels, valid_labels, net2)

### classification model
net3 = network2.Network([100, 10],cost=network2.CrossEntropyCost)
net3.SGD(train_data2, epochs=20, mini_batch_size=10, eta=0.1, lmbda=0.0,
         evaluation_data=valid_data2,
         monitor_training_cost=True, monitor_evaluation_cost=True,
         monitor_training_accuracy=True, monitor_evaluation_accuracy=True, confusionMatrix=True)
예제 #20
0
import mnist_loader
import network2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
# net = network2.Network([784, 70, 784], cost=network2.CsrossEntropyCost)

net = network2.load("../trained_networks/mnist_network.txt")
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
# net.large_weight_initializer()
# net.SGD(training_data, 30, 10, 0.03, evaluation_data=validation_data, lmbda = 5.0)
#
# print net.total_cost(training_data, 0)
# net.save("demo_network.txt")
# net = network2.load("demo_network.txt")
#
# # # img = net.feedforward(training_data[0][0])
plt.imsave("demo_99.png", training_data[53][0].reshape(28, 28), cmap=cm.gray)
plt.imsave("demo_.png", training_data[15][0].reshape(28, 28), cmap=cm.gray)

#
# print net.total_cost(training_data, 0)

for i in range(1):
    img = network2.transform(training_data[53][0], training_data[15][0],
                             (1.0 * i) / 45, net)
    string = "demo_" + str(i) + ".png"
    plt.imsave(string, img.reshape(28, 28), cmap=cm.gray)
예제 #21
0
import network2
import notmnist_loader
import numpy as np

net = network2.load("models/DenoiseModelAJ")

print np.shape(net.weights[0]), np.shape(net.weights[1]), np.shape(net.biases[0])

train_data, test_data, train_labels, test_labels = notmnist_loader.add_noise2_loader()

def getOutputOfHiddenUnits(data):
    output = []
    for i in xrange(len(data)):
        x = data[i][0]
        y = data[i][1]
        output.append(np.dot(net.weights[0], x) + net.biases[0])
    return output

def getNewDataset(train_data, test_data, train_labels, test_labels):
    train_outputOfHiddenUnits = getOutputOfHiddenUnits(train_data)
    test_outputOfHiddenUnits = getOutputOfHiddenUnits(test_data)

    train_results = [network2.vectorized_result(y) for y in train_labels]

    training = zip(train_outputOfHiddenUnits, train_results)
    testing = zip(test_outputOfHiddenUnits, test_labels)

    return training, testing

newNet = network2.Network([500, 100, 10])
new_train_data, new_test_data = getNewDataset(train_data, test_data, train_labels, test_labels)