コード例 #1
0
ファイル: helpers.py プロジェクト: veronikaKochugova/DropWeak
def xmlInvariance(n, forwardpasses=1):
    """ try writing a network to an xml file, reading it, rewrite it, reread it, and compare
    if the result looks the same (compare string representation, and forward processing 
    of some random inputs) """
    tmpfile = tempfile.NamedTemporaryFile()
    f = tmpfile.name
    NetworkWriter.writeToFile(n, f)
    tmpnet = NetworkReader.readFrom(f)
    NetworkWriter.writeToFile(tmpnet, f)
    endnet = NetworkReader.readFrom(f)
    netCompare(tmpnet, endnet, forwardpasses, True)
コード例 #2
0
ファイル: helpers.py プロジェクト: pachkun/Machine_learning
def xmlInvariance(n, forwardpasses = 1):
    """ try writing a network to an xml file, reading it, rewrite it, reread it, and compare
    if the result looks the same (compare string representation, and forward processing 
    of some random inputs) """
    tmpfile = tempfile.NamedTemporaryFile()
    f = tmpfile.name
    NetworkWriter.writeToFile(n, f)
    tmpnet = NetworkReader.readFrom(f)
    NetworkWriter.writeToFile(tmpnet, f)
    endnet = NetworkReader.readFrom(f)
    netCompare(tmpnet, endnet, forwardpasses, True)
コード例 #3
0
def init_model():
    global model
    with open('./data/list_person.txt', 'r') as lpfile:
        row_count = sum(1 for row in lpfile)
    for i in range(1, row_count + 1):
        net = NetworkReader.readFrom('./model/net%d.xml' % i)
        model.insert(model.__len__(), net)
コード例 #4
0
def recog_tower():

    network = NetworkReader.readFrom("tower.xml")

    tokyo_image_files = glob.glob("r/t*")

    tokyo_correct_count = 0
    for image_path in tokyo_image_files:
        image = cv2.imread(image_path)

        data = [0] * 64
        for row_num in range(image.shape[1]):
            for col_num in range(image.shape[0]):
                blue = declease_color(image[col_num, row_num, 0])
                green = declease_color(image[col_num, row_num, 1])
                red = declease_color(image[col_num, row_num, 2])

                data[rgb2bin(red, green, blue)] += 1

        dataset = SupervisedDataSet(64, 2)
        dataset.addSample(data, [1, 0])

        out = network.activateOnDataset(dataset)
        if np.round(out)[0, 0] == 1:
            tokyo_correct_count += 1
    print "tokyo correct count: %d, tokyo correct rate: %.5f" % (
        tokyo_correct_count,
        tokyo_correct_count * 1.0 / len(tokyo_image_files),
    )

    eiffel_image_files = glob.glob("r/e*")

    eiffel_correct_count = 0
    for image_path in eiffel_image_files:
        image = cv2.imread(image_path)

        data = [0] * 64
        for row_num in range(image.shape[1]):
            for col_num in range(image.shape[0]):
                blue = declease_color(image[col_num, row_num, 0])
                green = declease_color(image[col_num, row_num, 1])
                red = declease_color(image[col_num, row_num, 2])

                data[rgb2bin(red, green, blue)] += 1

        dataset = SupervisedDataSet(64, 2)
        dataset.addSample(data, [1, 0])

        out = network.activateOnDataset(dataset)
        if np.round(out)[0, 0] == 1:
            eiffel_correct_count += 1
    print "eiffel correct count: %d, eiffel correct rate: %.5f" % (
        eiffel_correct_count,
        eiffel_correct_count * 1.0 / len(eiffel_image_files),
    )

    print "total correct count: %d, total correct rate: %.5f" % (
        tokyo_correct_count + eiffel_correct_count,
        (tokyo_correct_count + eiffel_correct_count) * 1.0 / (len(tokyo_image_files) + len(eiffel_image_files)),
    )
コード例 #5
0
def main():
    filename = '15_05/100_405.xml'
    resize_files(STATIC_DIR, RESIZED_DIR)
    noise_files(RESIZED_DIR, RESIZED_DAMAGED_TEST_DIR)
    prepare_input_img(RESIZED_DAMAGED_TEST_DIR)
    cv2.imwrite('media/answers.png', np.ones((40, 30, 1)))

    net = NetworkReader.readFrom(os.path.join(NETWORKS_DIR, filename))
    slide_window_mark_numbers(net)
コード例 #6
0
ファイル: helpers.py プロジェクト: bayerj/pybrain
def xmlInvariance(n, forwardpasses = 1):
    """ try writing a network to an xml file, reading it, rewrite it, reread it, and compare
    if the result looks the same (compare string representation, and forward processing 
    of some random inputs) """
    # We only use this for file creation.
    tmpfile = tempfile.NamedTemporaryFile(dir=".")
    f = tmpfile.name
    tmpfile.close()

    NetworkWriter.writeToFile(n, f)
    tmpnet = NetworkReader.readFrom(f)
    NetworkWriter.writeToFile(tmpnet, f)
    endnet = NetworkReader.readFrom(f)

    # Unlink temporary file.
    os.unlink(f)

    netCompare(tmpnet, endnet, forwardpasses, True)
コード例 #7
0
ファイル: helpers.py プロジェクト: chenzhikuo1/OCR-Python
def xmlInvariance(n, forwardpasses=1):
    """ try writing a network to an xml file, reading it, rewrite it, reread it, and compare
    if the result looks the same (compare string representation, and forward processing 
    of some random inputs) """
    import os.path
    f = 'temp/xmlInvarianceTest.xml'
    if os.path.split(os.path.abspath(os.path.curdir))[1] == 'unittests':
        f = '../' + f
    NetworkWriter.writeToFile(n, f)
    tmpnet = NetworkReader.readFrom(f)
    NetworkWriter.writeToFile(tmpnet, f)
    endnet = NetworkReader.readFrom(f)
    if str(n) == str(endnet):
        print 'Same representation'
    else:
        print n
        print "-" * 80
        print endnet

    outN = zeros(n.outdim)
    outEnd = zeros(endnet.outdim)
    n.reset()
    endnet.reset()
    for dummy in range(forwardpasses):
        inp = randn(n.indim)
        outN += n.activate(inp)
        outEnd += endnet.activate(inp)

    if sum(map(abs, outN - outEnd)) < 1e-9:
        print 'Same function'
    else:
        print outN
        print outEnd

    if n.__class__ == endnet.__class__:
        print 'Same class'
    else:
        print n.__class__
        print endnet.__class__
コード例 #8
0
ファイル: helpers.py プロジェクト: HKou/pybrain
def xmlInvariance(n, forwardpasses = 1):
    """ try writing a network to an xml file, reading it, rewrite it, reread it, and compare
    if the result looks the same (compare string representation, and forward processing 
    of some random inputs) """
    import os.path
    f = 'temp/xmlInvarianceTest.xml'
    if os.path.split(os.path.abspath(os.path.curdir))[1] == 'unittests':        
        f = '../'+f
    NetworkWriter.writeToFile(n, f)
    tmpnet = NetworkReader.readFrom(f)
    NetworkWriter.writeToFile(tmpnet, f)
    endnet = NetworkReader.readFrom(f)
    if str(n) == str(endnet):
        print 'Same representation'
    else:
        print n
        print "-" * 80
        print endnet
        
    outN = zeros(n.outdim)
    outEnd = zeros(endnet.outdim)
    n.reset()
    endnet.reset()
    for dummy in range(forwardpasses):
        inp = randn(n.indim)
        outN += n.activate(inp)
        outEnd += endnet.activate(inp)
        
    if sum(map(abs, outN - outEnd)) < 1e-9:
        print 'Same function'
    else:
        print outN
        print outEnd

    if n.__class__ == endnet.__class__:
        print 'Same class'
    else:        
        print n.__class__
        print endnet.__class__
コード例 #9
0
 def build_net(self):
     if os.path.exists(self.NET_FILE):
         return NetworkReader.readFrom(self.NET_FILE)
     ds = ClassificationDataSet(len(feats), nb_classes=len(classes))
     for c in classes:
         print c
         with codecs.open(os.path.join(self.data_root, c+".txt"), 'r', 'utf8') as f:
             for line in f:
                 r = Record("11", line, c, "")
                 ds.appendLinked(r.features(), [r.class_idx()])
     ds._convertToOneOfMany([0, 1])
     net = buildNetwork(ds.indim, int((ds.indim + ds.outdim)/2), ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
     trainer = BackpropTrainer(net, ds, momentum=0.75, verbose=True)
     trainer.trainUntilConvergence(maxEpochs=300)
     NetworkWriter.writeToFile(net, self.NET_FILE)
     return net
コード例 #10
0
def recog():

    network = NetworkReader.readFrom('gotiusa.xml')

    image_files = glob.glob('gotiusa/*')
    cascade = cv2.CascadeClassifier('lbpcascade_animeface.xml')

    for i, image_path in enumerate(image_files):

        image = cv2.imread(image_path)

        face = cascade.detectMultiScale(image, 1.1, 3)
        for (x, y, w, h) in face:
            cut_image = image[y:y+h, x:x+w]
            cv2.resize(cut_image, (80, 80))
            train_data = [0] * 64
            for row_num in range(cut_image.shape[1]):
                for col_num in range(cut_image.shape[0]):
                    blue = declease_color(cut_image[col_num, row_num, 0])
                    green = declease_color(cut_image[col_num, row_num, 1])
                    red = declease_color(cut_image[col_num, row_num, 2])

                    train_data[rgb2bin(red, green, blue)] += 1
            dataset = SupervisedDataSet(64, 1)
            dataset.addSample(train_data, [0])

            out = network.activateOnDataset(dataset)
            output = np.round(out[0, 0])

            if output == 0:
                cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 3)
            if output == 1:
                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 3)
            if output == 2:
                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 255), 3)
            if output == 3:
                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 3)
            if output == 4:
                cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 3)
        cv2.imwrite('result/%06d.jpg' % i, image)
コード例 #11
0
def recog_bokkuri_tower():

    network = NetworkReader.readFrom('tower.xml')

    image = cv2.imread('bokkuritower.jpg')
    image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE))

    tokyo_correct_count = 0
    data = [0] * 64
    for row_num in range(image.shape[1]):
        for col_num in range(image.shape[0]):
            blue = declease_color(image[col_num, row_num, 0])
            green = declease_color(image[col_num, row_num, 1])
            red = declease_color(image[col_num, row_num, 2])

            data[rgb2bin(red, green, blue)] += 1

    dataset = SupervisedDataSet(64, 2)
    dataset.addSample(data, [1, 0])

    out = network.activateOnDataset(dataset)
    if np.round(out)[0, 0] == 1:
        tokyo_correct_count += 1
    print "tokyo correct count: %d" % tokyo_correct_count
コード例 #12
0
ファイル: main.py プロジェクト: DianaShatunova/NEUCOGAR
def importCatDogRNN(fileName = root.path()+"/res/recCatDogANN"):
    n = NetworkReader.readFrom(root.path()+"/res/cat_dog_nm_params.xml")
    return n
コード例 #13
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def importCatDogRNN(fileName=root.path() + "/res/recCatDogANN"):
    n = NetworkReader.readFrom(root.path() + "/res/cat_dog_nm_params.xml")
    return n
コード例 #14
0
ファイル: imagerecognition.py プロジェクト: hardyce/datasci
def loadnet(net):
    net = NetworkReader.readFrom('C:\\Users\\hardy_000\\Documents\\datasci\\net.xml')
    return net
コード例 #15
0
ファイル: core.py プロジェクト: dferens/shapesrecog
def import_network(file_path):
    return NetworkReader.readFrom(file_path)
コード例 #16
0
test_data._convertToOneOfMany()
data_split = int(num_of_examples * 0.7)

# setting the field names
train_data.setField('input', X[0:data_split, :])
train_data.setField('target', Y[0:data_split, :])

for i in range(data_split, num_of_examples):
    test_data.addSample(X[i, :], Y[i, :])

test_data.setField('input', X[data_split:num_of_examples, :])
test_data.setField('target', Y[data_split:num_of_examples, :])


if os.path.isfile('dig.xml'):
    net = NetworkReader.readFrom('dig.xml')
    net.sorted = False
    net.sortModules()
else:

    net = buildNetwork(size_of_example, size_of_example / 2, num_of_labels, bias=True, hiddenclass=SigmoidLayer,
                       outclass=SoftmaxLayer)
    net.sortModules()

test_index = randint(0, X.shape[0])
test_input = X[test_index, :]


real_train = train_data['target'].argmax(axis=1)
real_test = test_data['target'].argmax(axis=1)
コード例 #17
0
from pybrain.datasets import UnsupervisedDataSet
import numpy as np
from pybrain.tools.xml import NetworkReader


print 'read dataset'

text_file = open('doc/recog.txt')
lines = text_file.read().split('\n')
text_file.close()

text_file = open('doc/labels.txt')
labels = text_file.read().split('\n')
text_file.close()

network = NetworkReader.readFrom('NN.xml')

for line in lines:

    if not line:
        continue
    line = line.split(' ')
    datas = line[:-1]
    x = []
    for data in datas:
        x.append(float(data))

    data_set = UnsupervisedDataSet(13)
    data_set.addSample(x)

    out = network.activateOnDataset(data_set)
コード例 #18
0
import matplotlib.pyplot as plt
from numpy import *
from pybrain.tools.xml import NetworkReader
from scipy import io

# load data
data = io.loadmat('data_mnist.mat')  # load the data

X = data['X']
Y = data['y']  # split to x and y

c = random.randint(0, X.shape[0])  # get random index

c2 = X[c, :]  #  get the data stored at that index

#  show the in a graph
m, n = shape(X)
image = array(X[c, 0:n])
plt.imshow((image.reshape(20, 20)).T, cmap='Greys')
plt.show()

# read the saved network from the file
net = NetworkReader.readFrom('test_temp.xml')

# pass the test image through the neural net
prediction = net.activate(c2)
# get the value with the highest probability
p = argmax(prediction, axis=0)
print(prediction)
print("predicted output is \t" + str(p))
コード例 #19
0
ファイル: play_nn.py プロジェクト: rsiera/pybrain_examples
import os

from pybrain.tools.xml import NetworkReader

from game import play
from pybrain_examples.nn_pygame_bird.constants import NETWORKS_DIR

if __name__ == '__main__':
    filename = '2016_05_19/10_50.xml'
    net = NetworkReader.readFrom(os.path.join(NETWORKS_DIR, filename))
    play(net, False)
コード例 #20
0
ファイル: trainer.py プロジェクト: ultrabots/pi-car
from pybrain.tools.xml import NetworkWriter,NetworkReader
j = 0
NPZ = 'data.npz'
XML = 'net.xml'
image_array = np.zeros((1, 9600))
label_array = np.zeros((1, 4), 'float')

#find NPZ file
if os.path.exists(NPZ) == False:
    print('NPZ file does not exist!')
    raw_input(">")
    sys.exit()

if os.path.exists(XML) == True:
    # load neural network
    network = NetworkReader.readFrom(XML)
else:
    # build new neural network
    network = buildNetwork(9600, 32, 32, 4, bias = True)
    

training_data = glob.glob(NPZ)
target = SupervisedDataSet(9600, 4)
trainer = BackpropTrainer(network, target)


# load NPZ file
for single_npz in training_data:
    with np.load(single_npz) as data:
        print data.files
        train_temp = data['train']
コード例 #21
0
def load_net():
        from pybrain.tools.xml import NetworkReader
        open_filename = tkFileDialog.askopenfilename()
        global net
        net=NetworkReader.readFrom(open_filename)
コード例 #22
0
import numpy

from scipy.io.wavfile import read
from scipy.io.wavfile import write
from python_speech_features import fbank, dct, lifter
from python_speech_features import delta
from pybrain.tools.xml import NetworkReader
import numpy as np
from scipy import signal
import GUI_Builder
from FundamentaFreq import freq_from_autocorr
net_noise = NetworkReader.readFrom('./model/net_noise.xml')


def resample(y, orig_sr, target_sr):

    if orig_sr == target_sr:
        return y
    ratio = float(target_sr) / orig_sr
    n_samples = int(np.ceil(y.shape[-1] * ratio))
    y_hat = signal.resample(y, n_samples, axis=-1)
    #if fix:
    #    y_hat = util.fix_length(y_hat, n_samples, **kwargs)
    return np.ascontiguousarray(y_hat, dtype=y.dtype)


def reduce_noise(filename):
    namefile = filename.replace(".wav", "")
    lowpass = 21  # Remove lower frequencies.
    highpass = 9000  # Remove higher frequencies.
    (Frequency, array) = read(filename)
コード例 #23
0
import matplotlib.pyplot as plt
from numpy import *
from pybrain.tools.xml import NetworkReader
from scipy import io

# load data
data = io.loadmat('data_mnist.mat')  # load the data

X = data['X']  # store the parts of the data labeled X so that we can get its size and shape

c = random.randint(0, X.shape[0])  # get random index

c2 = X[c, :]  # get the data stored at that index

#  show the digit in a graph
m, n = shape(X)
image = array(X[c, 0:n])
plt.imshow((image.reshape(20, 20)).T, cmap='Greys')
plt.show()

# read the saved network from the file
net = NetworkReader.readFrom('good_net.xml')

# pass the test image through the neural net
prediction = net.activate(c2)
# get the value with the highest probability
p = argmax(prediction, axis=0)
print(prediction)
print("The digit should be : \t" + str(p))
コード例 #24
0
ファイル: ann.py プロジェクト: CBITT/python_prac
train_data._convertToOneOfMany()
test_data._convertToOneOfMany()
data_split = int(num_of_examples * 0.7)

# setting the field names
train_data.setField('input', X[0:data_split, :])
train_data.setField('target', Y[0:data_split, :])

for i in range(data_split, num_of_examples):
    test_data.addSample(X[i, :], Y[i, :])

test_data.setField('input', X[data_split:num_of_examples, :])
test_data.setField('target', Y[data_split:num_of_examples, :])

if os.path.isfile('dig.xml') and os.path.isfile('digHB.xml'):
    net = NetworkReader.readFrom('dig.xml')
    netHB = NetworkReader.readFrom('digHB.xml')

else:
    net = buildNetwork(size_of_example,
                       size_of_example / 2,
                       num_of_labels,
                       bias=True,
                       hiddenclass=SigmoidLayer,
                       outclass=SoftmaxLayer)
    netHB = buildNetwork(size_of_example,
                         size_of_example / 2,
                         num_of_labels,
                         bias=True,
                         hiddenclass=TanhLayer,
                         outclass=SoftmaxLayer)