예제 #1
0
def main(train_dir='train', test_dir='test'):

    print '... loading data'
    train_set, test_set, classes = load_data(train_dir, test_dir)

    print '... building the model'

    n = nn.Net(classes)

    n.add_hidden_layer(601, 300)
    n.add_hidden_layer(300, 150)
    n.add_hidden_layer(150, 75)
    n.add_hidden_layer(75, 25)

    print '... compiling the model'
    n.compile_model()

    current_error = 1
    error = 1
    # We train the network 100 times
    # Each time we evaluate the results and write out the error percentage
    for epoch in range(1, 20):

        if error < current_error:
            current_error = error
            n.save()

        # print '... training'
        for x in train_set:
            n.train_model(x[0], x[1])

        print '... calculating error'
        # compute zero-one loss on validation set
        error = numpy.mean([n.devtest_model(x[0], x[1]) for x in test_set])
        print('epoch %i, validation error %f %%' % (epoch, error * 100))
def main(train_dir='../train', test_dir='../test'):

    print '... loading data'
    train_set, test_set, classes = load_data(train_dir, test_dir)

    print '... building the model'
    n = nn.Net(learning_rate=0.00001, classes=classes, L2_reg=0.0001)
    n.add_hidden_layer(1024, 1800)
    n.add_hidden_layer(1800, 1200)
    n.add_hidden_layer(1200, 800)
    n.add_hidden_layer(800, 600)

    print '... compiling the model'
    n.compile_model()
    current_error = 1
    error = 1

    # We train the network 150 times
    # Each time we evaluate the results and write out the error accuracy
    for epoch in range(1, 151):

        if error < current_error:
            current_error = error
            print 'Saving matrix ...'
            n.save()
            print 'Save completed ...'

        z = time.time()

        print '... training'
        for x in train_set:
            n.train_model(x[0], x[1])

        print 'training took {}'.format(time.time() - z)
        train_set = load_train_data(train_dir, classes)

        # Calulating error
        print '... calculating error'
        error = numpy.mean(
            [n.devtest_model(x[0], classes.index(x[1])) for x in test_set])
        print('epoch %i, validation error %f %%' % (epoch, error * 100))
예제 #3
0
def speaker_recognition():
    global clients
    global client_frame
    global data_for_deep_learning

    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    nn = neuralnetwork.Net()
    nn.load()

    CHUNK = 1024
    FORMAT = pyaudio.paInt16
    CHANNELS = 2
    RATE = 16000

    p = pyaudio.PyAudio()

    while True:
        for i, x in enumerate(data_for_deep_learning):
            if len(x) >= 20:
                y = x[:]

                WAVE_OUTPUT_FILENAME = "output" + str(i) + ".wav"

                wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
                wf.setnchannels(CHANNELS)
                wf.setsampwidth(p.get_sample_size(FORMAT))
                wf.setframerate(RATE)
                wf.writeframes(b''.join(y))
                wf.close()

                person = nn.evaluate(WAVE_OUTPUT_FILENAME)
                print "Evaluate..."

                for c in clients:
                    sock.sendto(str(person), (c[0], 8765))

                data_for_deep_learning[i] = []
from os import path
from os.path import isfile, join


class bcolors:
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'


print 'Creating the network'
n = nn.Net()
n.load('matrix.txt')

train_input = [
    join("test", f) for f in os.listdir("test") if isfile(join("test", f))
]

print 'Evaluation'
for elem in train_input:
    name = n.evaluate(elem)
    print "{}\t===>\t{}\t".format(elem.split('/')[-1], name),
    if name in elem:
        print bcolors.OKGREEN + "OK" + bcolors.ENDC
    else:
        print bcolors.FAIL + "FAIL" + bcolors.ENDC
    def testEvaulateInCaseOfAPersonItWasTrainedFor(self):
        n = neuralnetwork.Net()
        n.load('matrix.txt')
        test = n.evaluate('FMEV_Sr10.wav')

        self.assertEqual(test, 'FMEV')
 def testNeuralNetworkCanBeInstatiatedCorrectly(self):
     n = neuralnetwork.Net()
     self.assertIsInstance(n, neuralnetwork.Net)
예제 #7
0
                                          batch_size=4,
                                          shuffle=False,
                                          num_workers=0)

numberOfClasses = 12
numberOfAttributes = 24

#0 is heartmonitor
#1-13 is imu1
#14-26 is imu2
#27-39 is imu3
imulist = [1, 13, 13, 13]

net = neuralnetwork.Net(imulist,
                        sliding_window_size,
                        12,
                        gpudevice,
                        uncertaintyForwardPasses=100)

#training_data   = neuralnetwork.shapeinputs(imulist, training_data   ,gpudevice)
#validation_data = neuralnetwork.shapeinputs(imulist, validation_data ,gpudevice)
#test_data       = neuralnetwork.shapeinputs(imulist, test_data       ,gpudevice)

#
neuralnetwork.train(net,
                    training_loader,
                    validation_loader,
                    attr_representation,
                    "cosine",
                    epochs=10)
neuralnetwork.test(net, test_loader, "test", attr_representation)
예제 #8
0
파일: main.py 프로젝트: Halloerik/HAR
def test_performance():
    config = {
        "pamap2" : {
            'batch_size' : 50,
            'sliding_window_size' : 100,
            'sliding_window_step' : 22,
    
            #Training Parameters
            'epochs' : 1,
            'learning_rate' : [0.0001],
            'weight_decay' : [0.0001],
            'momentum' : [0.7],
            'loss_critereon' : [torch.nn.BCELoss()],
            'optimizer' : ["RMSprop"],
    
            #Network parameters
            'kernelsize' : [(5,1)],    
    
            #Attribute Representation
            'n_attributes' : 24,
            'distance_metric' : ["braycurtis"],
    
            #Uncertainty
            'uncertainty_forward_passes' : 100
        },
        "gestures" : {
            'batch_size' : 100,
            'sliding_window_size' : 24,
            'sliding_window_step' : 12,
    
            #Training Parameters
            'epochs' : 10,
            'learning_rate' : [0.0001],
            'weight_decay' : [0.00001],
            'momentum' : [0.9],
            'loss_critereon' : [torch.nn.BCELoss()],
            'optimizer' : ["RMSprop"],
    
            #Network parameters
            'kernelsize' : [(5,1)],    
    
            #Attribute Representation
            'n_attributes' : 32,
            'distance_metric' : ["cosine"],
    
            #Uncertainty
            'uncertainty_forward_passes' : 100
        },
        "locomotion" : {
            'batch_size' : 100,
            'sliding_window_size' : 24,
            'sliding_window_step' : 12,
    
            #Training Parameters
            'epochs' : 10,
            'learning_rate' : [0.0001],
            'weight_decay' : [0.00001],
            'momentum' : [0.9],
            'loss_critereon' : [torch.nn.BCELoss()],
            'optimizer' : ["RMSprop"],
    
            #Network parameters
            'kernelsize' : [(5,1)],            
    
            #Attribute Representation
            'n_attributes' : 10,
            'distance_metric' : ["cosine"],
    
            #Uncertainty
            'uncertainty_forward_passes' : 100
        },
        
        
        'gpu_device' : torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
    }
    
    run_number = 0
    
    data_set = ["pamap2","locomotion","gestures"]
    for ds in data_set:
        batch_size = config[ds]['batch_size']
        
        training_loader,validation_loader,test_loader, imu_list, attr_rep = load_data(
                ds,config['gpu_device'],batch_size,config[ds]['sliding_window_size'],config[ds]['sliding_window_step'])
        
        for lr in config[ds]['learning_rate']:
            for wd in config[ds]['weight_decay']:
                for m in config[ds]['momentum']:
                    for ks in config[ds]['kernelsize']:
                        for opt in config[ds]['optimizer']:
                            for criterion in config[ds]['loss_critereon']:
                                for dist_metric in config[ds]['distance_metric']:
                                    for i in range(10):
                                        network = neuralnetwork.Net(imu_list,config[ds]['sliding_window_size'], config[ds]['n_attributes'],
                                                                 config['gpu_device'], ks, config[ds]['uncertainty_forward_passes']).cuda(config['gpu_device'])
                                                                 
                                        optimizer = get_optimiser(network, opt, lr, wd, m)
                                    
                                        
                                        data = neuralnetwork.train(network, training_loader, validation_loader, criterion, optimizer, config[ds]['epochs'], config['gpu_device'],attr_rep,dist_metric)
                                    
                                        config_str = "Dataset; {}; batch_size; {}; window_size; {}; window_step; {}; epochs; {}; learning_rate; {}; weight_decay; {}; momentum; {}; optimizer; {}; distance_metric; {}; uncertainty; {}".format(
                                            ds, batch_size, config[ds]['sliding_window_size'],config[ds]['sliding_window_step'],config[ds]['epochs'],lr,wd,m,opt,dist_metric,config[ds]['uncertainty_forward_passes'])
                                        save_run_stats("train run {}, {}".format(run_number,ds),data,config_str)
                                        run_number += 1

                                        results = neuralnetwork.test(network, test_loader, criterion, config['gpu_device'], attr_rep, dist_metric, False)
                                        
                                        print(results)
예제 #9
0
파일: main.py 프로젝트: Halloerik/HAR
def main():
    config = {
    'data_set' : ["pamap2","locomotion","gestures"],
    #'data_set' : ["locomotion","gestures","pamap2"],
    #'data_set' : ["gestures","pamap2","locomotion"],
    
    'batch_size' : {"pamap2" : 50, "locomotion" : 100, "gestures" : 100},
    'sliding_window_size' : {"pamap2" : 100, "locomotion" : 24, "gestures" : 24},
    'sliding_window_step' : {"pamap2" : 22, "locomotion" : 12, "gestures" : 12},
    
    #Training Parameters
    'epochs' : 10,
    'learning_rate' : [0.01,0.001,0.0001],
    'weight_decay' : [0.0001,0.00001,0.000001],
    'momentum' : [0.9,0.8,0.7],
    'loss_critereon' : [torch.nn.BCELoss()],
    'optimizer' : ["SGD","Adam","RMSprop"],
    
    #Network parameters
    'kernelsize' : [(5,1)],
    
    'gpu_device' : torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
    
    #Attribute Representation
    'n_attributes' : {"pamap2" : 24, "locomotion" : 10, "gestures" : 32},
    'distance_metric' : ["cosine","braycurtis","euclidean"],
    
    #Uncertainty
    'uncertainty_forward_passes' : 100
    
    }
    
    run_number = 0
    
    #for i in range(9):
    #    print(current_config_str(config, i))
    
    for ds in config['data_set']:
        #for b in config['batch_size']:
        batch_size = config['batch_size'][ds]
        training_loader,validation_loader,test_loader, imu_list, attr_rep = load_data(
                ds,config['gpu_device'],batch_size,config['sliding_window_size'][ds],config['sliding_window_step'][ds])
        for lr in config['learning_rate']:
            for wd in config['weight_decay']:
                for m in config['momentum']:
                    for ks in config['kernelsize']:
                        for opt in config['optimizer']:
                            for criterion in config['loss_critereon']:
                                for dist_metric in config['distance_metric']:
                                    
                                    network = neuralnetwork.Net(imu_list,config['sliding_window_size'][ds], config['n_attributes'][ds],
                                                                 config['gpu_device'], ks, config['uncertainty_forward_passes']).cuda(config['gpu_device'])
                                                                 
                                    optimizer = get_optimiser(network, opt, lr, wd, m)
                                    
                                    
                                    print("Run number: {}".format(run_number))
                                        
                                    data = neuralnetwork.train(network, training_loader, validation_loader, criterion, optimizer, config['epochs'], config['gpu_device'],attr_rep,dist_metric)
                                           
                                    
                                    
                                    #plot_run_stats("train run {}, {}".format(run_number,ds), data, config['epochs'])
                                    save_run_stats("train run {}, {}".format(run_number,ds),data,current_config_str(config, run_number))
                                    
                                    run_number += 1