def skelton(self): layers = QtGui.QInputDialog.getInteger(self, '# of layers',"Enter your # of Layers \nincluding input but not output") layers = layers[0] size_layers = [] size_layers.extend([0]*layers) for i in range(len(size_layers)): size_layers[i] = QtGui.QInputDialog.getInteger(self, 'Size of layer',"Enter the size of Layer each layer")[0] size_layers.append(1) size_layers.insert(0,500) self.n = nt.neuralnet(*size_layers) self.d = ds.dataset(size_layers[0],1) self.d.generateDataSet() self.n.loadTrainingData(self.d.getTrainingDataset())
def __init__(self, parent=None): super(ExampleApp, self).__init__(parent) self.setupUi(self) # palette = QtGui.QPalette() # palette.setBrush(palette.Background,QtGui.QBrush(QtGui.QPixmap("waves.jpg"))) # self.setPalette(palette) self.lineEdit.setDragEnabled(True) self.lineEdit.setAcceptDrops(True) self.progressBar =self.progressBar self.Teach.clicked.connect(self.generations) self.Open_file.clicked.connect(self.file_open) self.Quit.clicked.connect(self.close_app) self.n = nt.neuralnet(500,300,100,1) self.d = ds.dataset(500,1) self.d.generateDataSet() self.n.loadTrainingData(self.d.getTrainingDataset()) self.setWindowIcon(QtGui.QIcon('wolf.png')) self.setWindowTitle("OCR GUI by Bryan Moore") self.construct.clicked.connect(self.skelton)
from ocrn import dataset as ds from ocrn import feature as ft from ocrn import neuralnet as nt import numpy as np print "Ocrn: Optical Character Recognition using Neural Network\nLatest version available at http://github.com/swvist\n" n = nt.neuralnet(100,80,1) print "Neural Network Initialized" d = ds.dataset(100,1) print "Training Data Set Initialized" if d.generateDataSet(): print "Training Data Set Generated" if n.loadTrainingData(d.getTrainingDataset()): print "Training Data Set loaded" while(True): x = raw_input("q: quit \t t: teach \t e: test \nWhat?\t:\t") if x == "q": break elif x == "t": t = int(raw_input("How many times?\t:\t")) #n.teach(t) n.teachUntilConvergence(max=t) elif x == "e": e = raw_input("Enter input file\t:\t") x = n.activate(ft.feature.getImageFeatureVector(e)) print "\nThere is a high probability that the image is '"+str(chr(x))+"'\n"
from ocrn import dataset as ds from ocrn import feature as ft from ocrn import neuralnet as nt import numpy as np print "Ocrn: Optical Character Recognition using Neural Network\nLatest version available at http://github.com/swvist\n" n = nt.neuralnet(100,80,1) print "Neural Network Initialized" d = ds.dataset(100,1) print "Training Data Set Initialized" if d.generateDataSet(): print "Training Data Set Generated" if n.loadTrainingData(d.getTrainingDataset()): print "Training Data Set loaded" while(True): x = raw_input("q: quit \t t: teach \t e: test \nWhat?\t:\t") if x == "q": break elif x == "t": t = int(raw_input("How many times?\t:\t")) n.teach(t) elif x == "e": e = raw_input("Enter input file\t:\t") x = n.activate(ft.feature.getImageFeatureVector(e)) print "\nThere is a high probability that the image is '"+str(unichr(x))+"'\n" else:
from ocrn import dataset as ds from ocrn import feature as ft from ocrn import neuralnet as nt import numpy as np print "\n \nOCR Prototype: Neural Networks w/ training data and test data \n \n" n = nt.neuralnet(500,100,1) print "Neural Network Initialized" d = ds.dataset(500,1) print "Training Data Set Initialized" if d.generateDataSet(): print "Training Data Set Generated" if n.loadTrainingData(d.getTrainingDataset()): print "Training Data Set loaded" while(True): x = raw_input("q: quit \t t: teach \t e: test \nWhat?\t:\t") if x == "q": break elif x == "t": t = int(raw_input("How many times do you want to train your data?\t:\t")) n.teach(t) elif x == "e": e = raw_input("Enter input file, make sure it is the absolute form and NOT in the string form\t:\t") x = n.activate(ft.feature.getImageFeatureVector(e)) print "\nThe highest probability letter from that the image is '"+str(unichr(x))+"'\n" else:
def __init__(self): """ Create a __new__ Grinder object. """ self.neural_network = nn.neuralnet(100,80,1) self.data_set = ds.dataset(100,1)
def reset(self): self.neural_network = nn.neuralnet(100,80,1) self.data_set = ds.dataset(100,1)