def train_and_test(training, testing):
    # create ann object
    ann = cv2.ANN_MLP()
    ann.create(np.array([len(training[0][0]), 8, 8, 1]))

    # make output into an 1xN matrix instead of Nx1
    output_col = training[1][np.newaxis].T
    # make sure inputs and outputs are same size
    assert len(training[1]) == len(training[0])
    # train the ann
    ann.train(training[0], output_col,
              np.array([1] * len(training[0]), dtype=np.float32))
    # test the ann
    retval, outputs = ann.predict(testing[0])
    print "results:", outputs
    print "actual:", testing[1]
    assert len(outputs) == len(testing[1])
    correct = 0
    error = 0
    # calculate error
    for i in range(len(outputs)):
        if round(outputs[i], 0) == testing[1][i]:
            correct += 1
        error += abs(testing[1][i] - outputs[i])
    print "correct:", correct
    return correct, error
Beispiel #2
0
    def __init__(self, layer_sizes, class_labels, params=None):
        """Constructor

            The constructor initializes the MLP.

            :param layer_sizes:   array of layer sizes [input, hidden,
                                  output]
            :param class_labels:  vector of human-readable (string) class 
                                  labels
            :param params:        MLP training parameters.
                                  For now, default values are used.
                                  Hyperparamter exploration can be achieved
                                  by embedding the MLP process flow in a
                                  for-loop that classifies the data with
                                  different parameter values, then pick the
                                  values that yield the best accuracy.
                                  Default: None
        """
        self.num_features = layer_sizes[0]
        self.num_classes = layer_sizes[-1]
        self.class_labels = class_labels
        self.params = params or dict()

        # initialize MLP
        self.model = cv2.ANN_MLP()
        self.model.create(layer_sizes)
Beispiel #3
0
 def __init__(self, settings):
     self.revclassdict = {
         "0": 0,
         "1": 1,
         "2": 2,
         "3": 3,
         "4": 4,
         "5": 5,
         "6": 6,
         "7": 7,
         "8": 8,
         "9": 9,
         ",": 10,
         "-": 11
     }
     self.keys = len(self.revclassdict)
     layers = np.array([400, 71, self.keys])
     self.nnetwork = cv2.ANN_MLP(layers, 1, 0.65, 1)
     datapath = (settings.storage_path + os.sep +
                 "user_numbers.xml").encode(sys.getfilesystemencoding())
     if isfile(datapath):
         self.nnetwork.load(datapath, "OCRMLP")
     else:
         datapath = (settings.app_path + os.sep + "numbers.xml").encode(
             sys.getfilesystemencoding())
         self.nnetwork.load(datapath, "OCRMLP")
     self.classdict = dict(
         (v, k.decode("utf-8")) for k, v in self.revclassdict.iteritems())
    def __init__(self):
        self.imagenes = np.zeros((1, 320 * 120), 'float')
        self.etiquetas = np.zeros((1, 3), 'float')
        self.capas = np.int32([320 * 120, 34, 3])
        self.red_neuronal = cv2.ANN_MLP()

        self.cargar_datos()
Beispiel #5
0
def neuronal_network():

    inp = [[0, 1], [1, 0], [0, 0], [1, 1]]
    out = [1, 1, 0, 0]
    ##add more points to the calc
    for i in range(0, len(inp)):
        nears = get_nearest_points(inp[i])
        #print(nears)
        inp.extend(nears)

        outs = [out[i]] * len(nears)
        #print ("outs: ", outs)
        out.extend(outs)

    inputs = np.array(inp, dtype="float32")
    targets = np.array(out, dtype="float32")
    salidas = np.array([0] * len(inputs), dtype="float32")
    layer_sizes = np.array([2, 1000, 1])

    nnxor = cv2.ANN_MLP(layer_sizes)

    step_size = 0.01
    momentum = 0.0
    nsteps = 10000
    max_err = 0.0001
    condition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS

    print("Inputs: ")
    print(inputs)
    print("Targets: ")
    print(targets)

    criteria = (condition, nsteps, max_err)

    # params is a dictionary with relevant things for NNet training.
    params = dict(term_crit=criteria,
                  train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
                  bp_dw_scale=step_size,
                  bp_moment_scale=momentum)

    num_iters = nnxor.train(inputs, targets, None, params=params)

    print("Num iterations {}".format(num_iters))

    predictions = np.empty_like(salidas)
    nnxor.predict(inputs, predictions)
    sols = []
    accuracy = 0
    for p in range(0, len(predictions)):
        val = int(round(predictions[p]))
        sols.append(val)
        if val == out[p]:
            accuracy += 1

    print("Solutions: ")
    print(np.array(sols))

    accuracy = (accuracy / len(out)) * 100
    print("Accurary: {}%".format(accuracy))
Beispiel #6
0
 def __init__(self, parent, image, ocr_data, path):
     self.ocr_data = ocr_data
     
     layers = np.array([400,32,36])
     self.nnetwork = cv2.ANN_MLP(layers, 1,0.6,1)
     self.nnetwork.load(path + "\\mlp.xml", "OCRMLP")
     self.classdict = {0:"A",1:"B",2:"C",3:"D",4:"E",5:"F",6:"G",7:"H",8:"I",9:"J",10:"K",11:"L",12:"M",13:"N",14:"O",15:"P",16:"Q",17:"R",18:"S",19:"T",20:"U",21:"V",22:"W",23:"X",24:"Y",25:"Z",26:"Ä",27:"Ö",28:"Ü",29:"À",30:"É",31:"È",32:"Ê",33:"'",34:"-",35:".",}
     self.ocrSnippets(parent, self.ocr_data, image)
Beispiel #7
0
    def trainProcess(self, classdict):
        KEYS = len(classdict)
        revclassdict = dict(
            (v, k.decode("utf-8")) for k, v in classdict.iteritems())
        dictlength = 0
        for key in classdict:
            if not self.base is None:
                if key in self.base:
                    dictlength += len(self.base[key]) / 400
            if not self.user is None:
                if key in self.user:
                    dictlength += len(self.user[key]) / 400
        if dictlength == 0:
            return None
        data = np.empty((dictlength, 400), dtype='float32')
        classes = -1 * np.ones((dictlength, KEYS), dtype='float32')

        counter = 0
        #np.set_printoptions(threshold=np.nan)
        for key in classdict:
            #base data
            if not self.base is None:
                if key in self.base:
                    for i in range(len(self.base[key]) / 400):
                        for j in range(400):
                            if self.base[key][i * 400 + j]:
                                data[counter][j] = 1.0
                            else:
                                data[counter][j] = 0.0
                        classes[counter][classdict[key]] = 1.0
                        counter += 1
                        #print data
                        #return None
            #user data
            if not self.user is None:
                if key in self.user:
                    for i in range(len(self.user[key]) / 400):
                        for j in range(400):
                            if self.user[key][i * 400 + j]:
                                data[counter][j] = 1.0
                            else:
                                data[counter][j] = 0.0

                        classes[counter][classdict[key]] = 1.0
                        counter += 1
        # parameter setup
        layers = np.array([400, 71, KEYS])
        nnetwork = cv2.ANN_MLP(layers, 1, 0.65, 1)
        params = dict(term_crit=(cv2.TERM_CRITERIA_COUNT
                                 | cv2.TERM_CRITERIA_EPS, 1000, 0.00001),
                      train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
                      bp_dw_scale=0.01,
                      bp_moment_scale=0.01)
        # training
        iterations = nnetwork.train(data, classes, None, params=params)
        self.message += "Iterations: " + str(iterations) + "\n"
        return nnetwork
Beispiel #8
0
 def __init__(self, settings):
     self.revclassdict = {
         "A": 0,
         "B": 1,
         "C": 2,
         "D": 3,
         "E": 4,
         "F": 5,
         "G": 6,
         "H": 7,
         "I": 8,
         "J": 9,
         "K": 10,
         "L": 11,
         "M": 12,
         "N": 13,
         "O": 14,
         "P": 15,
         "Q": 16,
         "R": 17,
         "S": 18,
         "T": 19,
         "U": 20,
         "V": 21,
         "W": 22,
         "X": 23,
         "Y": 24,
         "Z": 25,
         "1": 26,
         "2": 27,
         "3": 28,
         "4": 29,
         "5": 30,
         "6": 31,
         "7": 32,
         "8": 33,
         "9": 34,
         "-": 35,
         ".": 36,
         "'": 37,
         "&": 38,
         "[": 39,
         "]": 40
     }
     self.keys = len(self.revclassdict)
     layers = np.array([400, 71, self.keys])
     self.nnetwork = cv2.ANN_MLP(layers, 1, 0.65, 1)
     datapath = (settings.storage_path + os.sep +
                 "user_station.xml").encode(sys.getfilesystemencoding())
     if isfile(datapath):
         self.nnetwork.load(datapath, "OCRMLP")
     else:
         datapath = (settings.app_path + os.sep + "station.xml").encode(
             sys.getfilesystemencoding())
         self.nnetwork.load(datapath, "OCRMLP")
     self.classdict = dict(
         (v, k.decode("utf-8")) for k, v in self.revclassdict.iteritems())
Beispiel #9
0
 def __init__(self, layerSizes, classLabels, params=None):
     self.numFeatures = layerSizes[0]
     self.numClasses = layerSizes[-1]
     self.classLabels = classLabels
     self.params = params or dict()
     
     # initialize MLP
     self.model = cv3.ANN_MLP()
     self.model.create(layerSizes)
Beispiel #10
0
 def __init__(self, image, ocr_data, path):
     self.ocr_data = ocr_data
     layers = np.array([400, 32, 46])
     self.nnetwork = cv2.ANN_MLP(layers, 1, 0.6, 1)
     self.nnetwork.load(path + os.sep + "text.xml", "OCRMLP")
     self.classdict = {
         0: "A",
         1: "B",
         2: "C",
         3: "D",
         4: "E",
         5: "F",
         6: "G",
         7: "H",
         8: "I",
         9: "J",
         10: "K",
         11: "L",
         12: "M",
         13: "N",
         14: "O",
         15: "P",
         16: "Q",
         17: "R",
         18: "S",
         19: "T",
         20: "U",
         21: "V",
         22: "W",
         23: "X",
         24: "Y",
         25: "Z",
         26: "Ä",
         27: "Ö",
         28: "Ü",
         29: "À",
         30: "É",
         31: "È",
         32: "Ê",
         33: "'",
         34: "-",
         35: ".",
         36: "0",
         37: "1",
         38: "2",
         39: "3",
         40: "4",
         41: "5",
         42: "6",
         43: "7",
         44: "8",
         45: "9",
     }
     #try:
     self.ocrSnippets(self.ocr_data, image)
 def create(self):
     self.numberOutput = NN_OUTPUT_NUMBER
     self.numberInput  = NN_INPUT_LAYER
     self.hiddenLayer  = NN_HIDDEN_LAYER
     self.imgNNoutput = np.zeros((50,self.numberOutput*50),dtype = np.uint8)
     self.stepReplay = (MAX_ANGLE - MIN_ANGLE) / (self.numberOutput - 1)
     # Create model
     layer_sizes = np.int32([self.numberInput, self.hiddenLayer, self.numberOutput])
     print 'build model ', self.name, ' layer size = ', layer_sizes
     self.model = cv2.ANN_MLP()
     self.model.create(layer_sizes)
Beispiel #12
0
    def __init__(self, layer_sizes, class_labels, params=None, 
                 class_mode="one-vs-all"):
        self.num_features = layer_sizes[0]
        self.num_classes = layer_sizes[-1]
        self.class_labels = class_labels
        self.params = params or dict()
        self.mode = class_mode

        # initialize MLP
        self.model = cv2.ANN_MLP()
        self.model.create(layer_sizes)
Beispiel #13
0
 def __init__(self, settings):
     self.revclassdict = {"*": 0, "+": 1, "#": 2}
     self.keys = len(self.revclassdict)
     layers = np.array([400, 71, self.keys])
     self.nnetwork = cv2.ANN_MLP(layers, 1, 0.65, 1)
     datapath = (settings.storage_path + os.sep + "user_level.xml").encode(
         sys.getfilesystemencoding())
     if isfile(datapath):
         self.nnetwork.load(datapath, "OCRMLP")
     else:
         datapath = (settings.app_path + os.sep + "level.xml").encode(
             sys.getfilesystemencoding())
         self.nnetwork.load(datapath, "OCRMLP")
     self.classdict = dict(
         (v, k.decode("utf-8")) for k, v in self.revclassdict.iteritems())
Beispiel #14
0
    def __init__(self, direccion):

        self.imagen_real = None
        self.red_neuronal = cv2.ANN_MLP()
        self.capas = np.int32([320 * 120, 34, 3])

        self.red_neuronal.create(self.capas)
        self.red_neuronal.load(
            'C:/Users/lluis/Desktop/XXII VICTP 2017/GENERADO/CODIGO/Algoritmo de Entrenamiento/red.xml'
        )

        # Se crea un servidor y se enlaza a la direccion deseada.
        self.crear_servidor(direccion)

        # Declaramos un objeto Serial para comunicarnos con Arduino.
        self.arduino = serial.Serial('COM5', 115200)
Beispiel #15
0
def trainNet(netParams, maxIter, maxError, scale, cleanLabels, cleanFeatures):
    model = cv2.ANN_MLP()
    model.create(netParams)
    criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, maxIter,
                maxError)
    params = dict(term_crit=criteria,
                  train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
                  bp_dw_scale=scale,
                  bp_moment_scale=0.0)
    print "Training started..."
    num_iter = model.train(cleanFeatures, cleanLabels, None, params=params)
    print "Training completed in %d iterations..." % num_iter
    m = raw_input("Enter name of xml to save model(no ext. reqd):")
    model.save(m + ".xml")
    print "File saved successfully..."
    return m + ".xml"
    def __init__(self, capa_oculta):
        # En el constructor se definen las siguientes variables:
        #  * perceptron: es un modelo de red ANN_MLP de OpenCV.
        #  * imagenes: es una matriz que contiene las imagenes
        #    para entrenar a la red neuronal.
        #  * etiquetas: contiene los patrones de direccion que le
        #    corresponden a cada renglon de la matriz de imagenes.
        #  * img_prueba: imagenes de prueba para la red neuronal.
        #  * etq_prueba: etiquetas asociadas a las imagenes de prueba.
        self.perceptron = cv2.ANN_MLP(np.array([200 * 100, capa_oculta, 3]))
        self.imagenes = np.zeros((1, 200 * 100))
        self.etiquetas = np.zeros((1, 3), 'float')
        self.img_prueba = np.zeros((1, 200 * 100))
        self.etq_prueba = np.zeros((1, 3), 'float')

        # Se cargan los datos de entrenamiento y de prueba para la red neuronal.
        self.cargar_datos()
Beispiel #17
0
def trainNetwork(input_, response_):
	logger.info('...training neural network')
	# networkParams = dict(train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, term_crit=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10000, 0.0000000001), bp_dw_scale=0.1, bp_moment_scale=0.1, flags=(cv2.ANN_MLP_UPDATE_WEIGHTS | cv2.ANN_MLP_NO_INPUT_SCALE))
	
	networkParams = dict(train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, term_crit=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 5000, 0.000001), bp_dw_scale=0.1, bp_moment_scale=0.1)


	# networkParams = dict(train_method=cv2.ANN_MLP_TRAIN_PARAMS_RPROP, term_crit=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10000, 0.0000000001), rp_dw0=0.1, rp_dw_plus=1.2, rp_dw_minus=0.5, rp_dw_min=0.1, rp_dw_max=50, flags=(cv2.ANN_MLP_UPDATE_WEIGHTS | cv2.ANN_MLP_NO_INPUT_SCALE))

	# networkParams = dict(train_method=cv2.ANN_MLP_TRAIN_PARAMS_RPROP, term_crit=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10000, 0.0000000001), rp_dw0=0.1, rp_dw_plus=1.2, rp_dw_minus=0.5, rp_dw_min=0.1, rp_dw_max=50)


	network = cv2.ANN_MLP()
	network.create(layerSizes=numpy.array([len(input_[0]),5,1], dtype=numpy.int32), activateFunc=cv2.ANN_MLP_SIGMOID_SYM, fparam1=1, fparam2=1)

	rr = response_ -1 + response_
	network.train(inputs=input_, outputs=rr, sampleWeights=numpy.ones(len(response_), dtype = numpy.float32), params=networkParams)

	return network
    def __init__(self,
                 layer_sizes,
                 class_labels,
                 params=None,
                 class_mode="one-vs-all"):
        """Constructor

            The constructor initializes the MLP.

            :param layer_sizes:   array of layer sizes [input, hidden,
                                  output]
            :param class_labels:  vector of human-readable (string) class
                                  labels
            :param class_mode:    Classification mode:
                                  - "one-vs-all": The one-vs-all strategy 
                                    involves training a single classifier per 
                                    class, with the samples of that class as 
                                    positive samples and all other samples as 
                                    negatives.
                                  - "one-vs-one": The one-vs-one strategy 
                                    involves training a single classifier per
                                    class pair, with the samples of the first 
                                    class as positive samples and the samples 
                                    of the second class as negative samples.
            :param params:        MLP training parameters.
                                  For now, default values are used.
                                  Hyperparamter exploration can be achieved
                                  by embedding the MLP process flow in a
                                  for-loop that classifies the data with
                                  different parameter values, then pick the
                                  values that yield the best accuracy.
                                  Default: None
        """
        self.num_features = layer_sizes[0]
        self.num_classes = layer_sizes[-1]
        self.class_labels = class_labels
        self.params = params or dict()
        self.mode = class_mode

        # initialize MLP
        self.model = cv2.ANN_MLP()
        self.model.create(layer_sizes)
Beispiel #19
0
def predict(fileName, layers):
    nnet = cv2.ANN_MLP()
    nnet.create(layers)
    nnet.load(fileName)
    correctLabels = loadCorrectLabels()
    trainingFeatures = loadTrainingFeatures()
    i = 0
    count = 0
    for feature in trainingFeatures:
        prediction = np.zeros((1, 4), 'float')
        test = np.zeros((0, 76800), 'float')
        test = feature.reshape((1, 76800))
        test = test + 0.00
        nnet.predict(test, prediction)
        prediction = prediction.argmax(-1) + 1
        correct = correctLabels[i].argmax(-1) + 1
        if prediction[0] == correct:
            count = count + 1
        i = i + 1
    print "Correct Predictions: %d" % count
    a = float((float(count) / float(i)) * 100)
    print "Accuracy: %f" % a
Beispiel #20
0
def initANN(filename, species, nhidden, step_size, momentum, nsteps, max_err):
    trainingdata = []
    with open(filename, 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            trainingdata.append(row[:-1])
    inputs = np.empty((len(trainingdata), len(trainingdata[0])), 'float')

    for i in range(len(trainingdata)):
        a = np.array(list(trainingdata[i]))
        f = a.astype('float')
        inputs[i, :] = f[:]

    targets = -1 * np.ones((len(inputs), len(species)), 'float')

    i = 0
    with open(filename, 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            targets[i][species.index(row[-1])] = 1
            i = i + 1

    ninputs = len(trainingdata[0])  #number of features
    noutput = len(species)  #number of classes
    layers = np.array([ninputs, nhidden, noutput])

    nnet = cv2.ANN_MLP(layers)

    condition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS
    criteria = (condition, nsteps, max_err)

    params = dict(term_crit=criteria,
                  train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
                  bp_dw_scale=step_size,
                  bp_moment_scale=momentum)
    num_iter = nnet.train(inputs, targets, None, params=params)

    return nnet
Beispiel #21
0
    def train(self, dataset):
        all_samples, all_labels = dataset
        nn_config = np.array((all_samples.shape[1], 300, len(set(all_labels))),
                             dtype=np.int32)
        nn = cv2.ANN_MLP(nn_config)

        trainX = dataset[0]
        self.lb = preprocessing.LabelBinarizer()
        trainY = self.lb.fit_transform(dataset[1]).astype(np.float32)

        # Have to split into batches. See http://code.opencv.org/issues/4407.
        batch = 0
        while batch * self.MAX_TRAINING_CHUNK < trainX.shape[0]:
            print 'training batch %d of %d' % (
                batch + 1, 1 + trainX.shape[0] / self.MAX_TRAINING_CHUNK)
            batch_slice = (slice(self.MAX_TRAINING_CHUNK * batch,
                                 self.MAX_TRAINING_CHUNK * (batch + 1)),
                           slice(None, None))
            trainXbatch = trainX[batch_slice]
            trainYbatch = trainY[batch_slice]
            sampleWeights = np.ones((len(trainYbatch), 1))
            nn.train(trainXbatch, trainYbatch, sampleWeights=sampleWeights)
            batch += 1
        self.nn = nn
Beispiel #22
0
import numpy
import cv2

# ninputs numbers of inputs
noutputs = 3 # or 4 depends if it's movement o motor
nhidden = 1
# inputs
# targets

# layers
nnet = cv2.ANN_MLP(layers)

# Some parameters for learning. Step size is the gradient step size for backpropagation
step_size = 0.01

# Max steps of training
nsteps = 10000

# Error threshold for halting training
max_err = 0.0001

# When to stop: whichever comes first, count or error
condition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS

# Tuple of termination criteria: first condition, then # steps, then
# error tolerance second and third things are ignored if not implied
# by condition
criteria = (condition, nsteps, max_err)

# params is a dictionary with relevant things for NNet training.
params = dict( term_crit = criteria, 
Beispiel #23
0
            inpt[index] = np.resize(rows, (1, r * c))
            target[index][
                itr] = 1.0  #So 1st column in target-> group1, 2nd column in target-> group2
            #3rd column in target-> group3
            index += 1
            countOfImages += 1

    print "Total Number of Images: ", countOfImages
    print "final_trained: ", inpt.shape
    print "final_groups: ", target.shape

    total_inputs = len(inpt[0])
    total_hidden = 5
    total_outputs = 3  #len(inpt)
    ann_layers = np.array([total_inputs, total_hidden, total_outputs])
    NN = cv2.ANN_MLP(ann_layers)

    #Now we train our neural network
    iter = NN.train(np.float32(inpt),
                    np.float32(target),
                    None,
                    params=p.params
                    )  #NOTE: "params" also matter in determining the accuracy

    #NOTE: In predictions, we are categorizing every feature descriptor/ feature of the test image into one of the 3 groups
    print "Number of iterations: ", iter

    #Testing the trained system
    testPath = "E:\Aditya\Python_RIT\FCV\Project\Database_S\TestImages\Test_2" + "/"
    test_img = os.listdir(testPath)
    count = 0
Beispiel #24
0
 def __init__(self):
     self.model = cv2.ANN_MLP()
     self.layer_sizes = np.int32([50400, 32, 3])
     self.model.create(self.layer_sizes)
     self.model.load('ann_param/ann.xml')
Beispiel #25
0
xmin = np.amin(x_train, axis=0)
x_train = (x_train - xmin) / (xmax - xmin)

ymax = np.amax(y_train, axis=0)
ymin = np.amin(y_train, axis=0)
y_train = (y_train - ymin) / (ymax - ymin)

print "x_train.shape = ", x_train.shape
sample_number = x_train.shape[0]

test_start = 1150
test_end = 1200
y_test = y_sample[test_start:test_end, :]
x_test = x_sample[test_start:test_end, :]

ann = cv2.ANN_MLP()

layer_sizes = np.int32([5, 100, 30, 1])
ann.create(layer_sizes)

params = dict(term_crit=(cv2.TERM_CRITERIA_COUNT, 5000, 0.001),
              train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
              bp_dw_scale=0.01,
              bp_moment_scale=0.0)

ann.train(x_train, y_train, None, params=params)

retval, y_predict = ann.predict(x_test)

y_test_predict = y_predict * (ymax - ymin) + ymin
from matplotlib import pyplot as plt
Beispiel #26
0
 def load(self, fname, dataset):
     self.nn = cv2.ANN_MLP()
     self.nn.load(fname)
     self.lb = pickle.load(open(fname + '_lb'))
     return self
Beispiel #27
0
number_of_training_set = 30000
train_number_of_images, train_images = readImage(trainimagepath, to_size,
                                                 number_of_training_set)
train_number_of_images, train_labels = readLabel(trainlabelpath,
                                                 number_of_training_set)
##train_images = train_images * 255
##train_images = cv2.normalize(train_images)

number_of_test_set = 10000
test_number_of_images, test_images = readImage(testimagepath, to_size,
                                               number_of_test_set)
test_number_of_images, test_labels = readLabel(testlabelpath,
                                               number_of_test_set)
print 'loaded images and labels.'
########ANN#########
modelnn = cv2.ANN_MLP()
sample_n, var_n = train_images.shape
new_train_labels = unroll_responses(train_labels).reshape(-1, class_n)
layer_sizes = numpy.int32([var_n, 100, class_n])
modelnn.create(layer_sizes)
params = dict(term_crit=(cv2.TERM_CRITERIA_COUNT, 300, 0.01),
              train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
              bp_dw_scale=0.001,
              bp_moment_scale=0.0)
modelnn.train(train_images,
              numpy.float32(new_train_labels),
              None,
              params=params)
ret, resp = modelnn.predict(test_images)
y_val_nn = resp.argmax(-1)
evalfun('nn', y_val_nn, test_labels, test_number_of_images)
Beispiel #28
0
 def __init__(self):
     self.model = cv2.ANN_MLP()
Beispiel #29
0
        print data.files
        test_temp = data['train']
        test_labels_temp = data['train_labels']
        print test_temp.shape
        print test_labels_temp.shape
    image_array = np.vstack((image_array, test_temp))
    label_array = np.vstack((label_array, test_labels_temp))

test = image_array[1:, :]
test_labels = label_array[1:, :]
print test.shape
print test_labels.shape

# create mlp
layer_sizes = np.int32([38400, 32, 4])
model = cv2.ANN_MLP()
model.create(layer_sizes)
model.load('mlp_xml/mlp.xml')

# generate predictions
t0 = cv2.getTickCount()
retvals, outputs = model.predict(test)
prediction = resp.argmax(-1)
t1 = cv2.getTickCount()
time = (t1 - t0)/cv2.getTickFrequency()

print 'Prediction:', prediction

true_labels = test_labels.argmax(-1)
print 'True labels:', true_labels
Beispiel #30
0
 def create(self):
     layer_size = np.int32([38400, 32, 4])
     self.model = cv2.ANN_MLP(layer_size)
     self.model.load('mlp_xml/mlp.xml')