コード例 #1
0
def train_nets():
    global err, test_err, deltas, synapses, prv_update, curr_update

    error = 0
    for epoch in xrange(iter_no):
        #update based on each data point

        error_sum = 0
        test_error_sum = 0

        for i in xrange(len(data)):
            inputs, expected = dataset.get_data(i)
            execute_net(inputs)
            error = expected - receptors[
                depth - 1]  #error vector corresponding to each output
            #print error
            error_sum += sum(abs(error))

            #backpropagation using dynamic programming
            deltas[depth - 1] = activate(receptors[depth - 1], True) * error
            for index in xrange(depth - 2, -1, -1):
                deltas[index] = activate(receptors[index], True) * synapses[
                    index + 1].transpose().dot(deltas[index + 1])

            #update all the weights
            for index in xrange(depth - 1, 0, -1):
                curr_update[index] = deltas[index].reshape(
                    topology[index + 1], 1) * receptors[index - 1]
                synapses[index] += learning_rate * curr_update[
                    index] + momentum * prv_update[index]
                bias[index] += learning_rate * deltas[index]

            curr_update[0] = deltas[0].reshape(topology[1], 1) * inputs
            synapses[
                0] += learning_rate * curr_update[0] + momentum * prv_update[0]
            bias[0] += learning_rate * deltas[0]

            prv_update = curr_update

        for i in xrange(len(test)):
            inputs, expected = dataset.get_test(i)
            execute_net(inputs)

            tt = np.zeros(nodes_output)
            pos = np.argmax(receptors[depth - 1])
            tt[pos] = 1

            test_error_sum += sum(abs(expected - tt))
            #test_error_sum += sum(abs(expected - receptors[depth-1]))

        err[epoch] = error_sum / len(data)
        test_err[epoch] = test_error_sum / (
            2 * len(test)
        )  #single misclassification creates an error sum of 2.

        if epoch % 1 == 0:
            print "Iteration no: ", epoch, "    error: ", err[
                epoch], " test error: ", test_err[epoch]
コード例 #2
0
def predict(model_path, gene_type, input_path, output_path):
    model = tf.keras.models.load_model(model_path)
    model.summary()

    x, af, var_id = dataset.get_test(input_path, gene_type)
    pred = model.predict(x)
    pred = np.squeeze(pred, axis=1)

    df = pd.DataFrame({'var_id': var_id, 'pred': pred, 'gnomad_exome': af})
    df.to_csv(output_path, index=False)
コード例 #3
0
def evaluate(receptors, test_err, epoch):  
    test_error_sum = 0      
    #compute the validation set error        
    for i in xrange(net.len_test):
        inputs, expected = dataset.get_test(i)
        execute_net(inputs)
        
        tt = np.zeros(net.nodes_output)
        pos = np.argmax(receptors[net.depth])
        tt[pos] = 1            
            
        test_error_sum += sum(abs(expected - tt))
        
    test_err[epoch] = test_error_sum/(2*net.len_test) #single misclassification creates an error sum of 2.
コード例 #4
0
def test_eval(synapse):
    test_error_sum = 0
    for i in xrange(len(test)):
        inputs, expected = dataset.get_test(i)
        result = execute_net(inputs, synapse)
        
        tt = np.zeros(nodes_output)
        pos = np.argmax(result)
        tt[pos] = 1            
            
        test_error_sum += sum(abs(expected - tt))
        
    #Fitness is inversely proportional to error
    return get_fitness(test_error_sum/(2*len(test)))
コード例 #5
0
def train_nets():
    global  err, test_err, deltas, synapses, prv_update, curr_update
    
    error = 0    
    for epoch in xrange(iter_no):        
        #update based on each data point
    
        error_sum = 0
        test_error_sum = 0
        
        for i in xrange(len(data)):
            inputs, expected = dataset.get_data(i)
            execute_net(inputs)
            error = expected - receptors[depth-1]   #error vector corresponding to each output
            #print error
            error_sum += sum(abs(error))
                     
            #backpropagation using dynamic programming
            deltas[depth-1] = activate(receptors[depth-1],True)*error
            for index in xrange(depth-2, -1, -1):
                deltas[index] = activate(receptors[index],True)*synapses[index+1].transpose().dot(deltas[index+1])
            
            #update all the weights
            for index in xrange(depth-1, 0, -1):
                curr_update[index]  = deltas[index].reshape(topology[index+1],1)*receptors[index-1]
                synapses[index]     += learning_rate*curr_update[index] + momentum*prv_update[index]
                bias[index]         += learning_rate*deltas[index]
                
            curr_update[0] = deltas[0].reshape(topology[1],1)*inputs    
            synapses[0] += learning_rate*curr_update[0] + momentum*prv_update[0]
            bias[0]     += learning_rate*deltas[0]
            
            prv_update = curr_update
         
        for i in xrange(len(test)):
            inputs, expected = dataset.get_test(i)
            execute_net(inputs)
            
            tt = np.zeros(nodes_output)
            pos = np.argmax(receptors[depth-1])
            tt[pos] = 1            
                
            test_error_sum += sum(abs(expected - tt))
            #test_error_sum += sum(abs(expected - receptors[depth-1]))
        
        err[epoch] = error_sum/len(data)
        test_err[epoch] = test_error_sum/(2*len(test)) #single misclassification creates an error sum of 2.
        
        if epoch%1 == 0:
            print "Iteration no: ", epoch, "    error: ", err[epoch], " test error: ", test_err[epoch]
コード例 #6
0
def train_nets():
    global  err, test_err, deltas, synapses, prv_update, curr_update
    
    error = 0    
    for epoch in xrange(iter_no):        
        #update based on each data point
    
        error_sum = 0
        test_error_sum = 0
        
        for i in xrange(len(data)):                 #Train and learn the parameters on the training data
            inputs, expected = dataset.get_data(i)  #get next training data and label
            
            execute_net(inputs)                     #fwd pass of the inputs in the net
            error = expected - receptors[depth]     #error vector corresponding to each output
            error_sum += sum(abs(error))            #Absolute sum of error across all the classes
                     
            #backpropagation using dynamic programming
            deltas[depth] = act.activate(receptors[depth],True, act_fn[depth])*error
            for index in xrange(depth-1, -1, -1):
                deltas[index] = act.activate(receptors[index],True, act_fn[index])*synapses[index].transpose().dot(deltas[index+1])
            
            #update all the weights
            for index in xrange(depth-1, -1, -1):
                curr_update[index]  = deltas[index+1].reshape(topology[index+1],1)*receptors[index]
                synapses[index]     += learning_rate*curr_update[index] + momentum*prv_update[index]
                bias[index+1]       += learning_rate*deltas[index+1]
            
            prv_update = curr_update                #cur_updates become the prv_updates for next data
         
        
        for i in xrange(len(test)):                 #Evaluate the quality of net on validation test set
            inputs, expected = dataset.get_test(i)  #Get the next validation set data and label
            execute_net(inputs)                     #fwd pass of the inputs in the net
            
            tt = np.zeros(nodes_output)
            pos = np.argmax(receptors[depth])
            tt[pos] = 1                             #determine the output class based on highest score
                
            test_error_sum += sum(abs(expected - tt))#calculate total misclassification
        
        err[epoch] = error_sum/len(data)
        test_err[epoch] = test_error_sum/(2*len(test)) #single misclassification creates an error sum of 2.
        
        if epoch%1 == 0:
            print "Iteration no: ", epoch, "    error: ", err[epoch], " test error: ", test_err[epoch]
            
        if np.argmin(err[:epoch+1]) == epoch:       #should be argmin of test_err actually
            save()                                  #Save the values if it's better than all the previous ones
コード例 #7
0
def train_nets():
    global err, test_err, deltas, synapses, prv_update, curr_update

    error = 0
    for epoch in xrange(iter_no):
        #update based on each data point

        error_sum = 0
        test_error_sum = 0

        for i in xrange(len(
                data)):  #Train and learn the parameters on the training data
            inputs, expected = dataset.get_data(
                i)  #get next training data and label

            execute_net(inputs)  #fwd pass of the inputs in the net
            error = expected - receptors[
                depth]  #error vector corresponding to each output
            error_sum += sum(
                abs(error))  #Absolute sum of error across all the classes

            #backpropagation using dynamic programming
            deltas[depth] = act.activate(receptors[depth], True,
                                         act_fn[depth]) * error
            for index in xrange(depth - 1, -1, -1):
                deltas[index] = act.activate(
                    receptors[index], True,
                    act_fn[index]) * synapses[index].transpose().dot(
                        deltas[index + 1])

            #update all the weights
            for index in xrange(depth - 1, -1, -1):
                curr_update[index] = deltas[index + 1].reshape(
                    topology[index + 1], 1) * receptors[index]
                synapses[index] += learning_rate * curr_update[
                    index] + momentum * prv_update[index]
                bias[index + 1] += learning_rate * deltas[index + 1]

            prv_update = curr_update  #cur_updates become the prv_updates for next data

        for i in xrange(len(
                test)):  #Evaluate the quality of net on validation test set
            inputs, expected = dataset.get_test(
                i)  #Get the next validation set data and label
            execute_net(inputs)  #fwd pass of the inputs in the net

            tt = np.zeros(nodes_output)
            pos = np.argmax(receptors[depth])
            tt[pos] = 1  #determine the output class based on highest score

            test_error_sum += sum(abs(expected -
                                      tt))  #calculate total misclassification

        err[epoch] = error_sum / len(data)
        test_err[epoch] = test_error_sum / (
            2 * len(test)
        )  #single misclassification creates an error sum of 2.

        if epoch % 1 == 0:
            print "Iteration no: ", epoch, "    error: ", err[
                epoch], " test error: ", test_err[epoch]

        if np.argmin(err[:epoch +
                         1]) == epoch:  #should be argmin of test_err actually
            save()  #Save the values if it's better than all the previous ones