Esempio n. 1
0
def foward_error (n,m,ans,NN):
    data=np.load('data.npy')
    #from loadone1 import load_sample as load
    w1=np.zeros((5,5,1,3,5))
    w2=np.zeros((5,5,3,6,5))
    wf1=np.zeros((7*7*2*3, 1024,5))
    bf1=np.zeros((1024,5))
    T=0
    #m=np.array([0,3,6])
    #M12,a12=load(data[:,:,m,12])
    i=0
    while i<5:
        w,ww,Num=train_cov(data[:,:,m,n],200,3,8)
        if Num==1:
            w1[:,:,:,:,i]=w
            w2[:,:,:,:,i]=ww
            i+=1
    for i in range (0,5):
        wf1[:,:,i],bf1[:,i],Num=train_fcon(data[:,:,:,n],w1[:,:,:,:,i],w2[:,:,:,:,i],50,8)
        if Num==1:
            T=T+1
            

    EE=np.zeros((8,5))
    for j in range (0,5):
        II=forward(data[:,:,:,n],w1[:,:,:,:,j],w2[:,:,:,:,j])
        error01=II[1,:]-II[0,:]
        error12=II[2,:]-II[1,:]
        error34=II[4,:]-II[3,:]
        error54=II[5,:]-II[4,:]
        error67=II[7,:]-II[6,:]
        error78=np.zeros((8,294))
        for i in range (0,8):
            error78[i,:]=II[8+i,:]-II[7,:]
        O1=np.dot(error67.reshape(1,294),wf1[:,:,j])+bf1[:,j]
        O2=np.zeros((8,1024))
        for i in range (0,8):
            O2[i,:]=np.dot(error78[i,:].reshape(1,294),wf1[:,:,j])+bf1[:,j]
        E=np.zeros(8)
        for i in range (0,8):
            E[i]=abs(O2[i,:]-O1).sum()
        EE[:,j]=E
        N=np.zeros(5)
        for i in range (0,5):
            N[i]=np.argmin(EE[:,i])
    print(np.argmin(np.average(EE,axis=1)))
    print(np.argsort(np.average(EE,axis=1)))
    print(np.average(EE,axis=1)[ans]/np.average(EE,axis=1).mean())
    print(N)
    print(T)
    np.save('Data/'+str(NN)+'weight1_no_rotation_rerun.npy',w1)
    np.save('Data/'+str(NN)+'weight2_no_rotation_rerun.npy',w2)
    np.save('Data/'+str(NN)+'weightf_no_rotation_rerun.npy',wf1)
    np.save('Data/'+str(NN)+'biasf_no_rotation_rerun.npy',bf1)
    np.save('Data/'+str(NN)+'Error_no_rotation_rerun.npy',EE)
    np.save('Data/'+str(NN)+'Termination_no_rotation_rerun.npy',T)
Esempio n. 2
0
def foward_error(n, ans):
    data = np.load('data_4.npy')
    E_min = np.zeros((25))
    EEE = np.zeros((6, 25))
    for i in range(0, 5):
        N_str = '3_c_f' + str(i)
        w1 = np.load('Data/' + str(N_str) + 'weight1_t.npy')
        w2 = np.load('Data/' + str(N_str) + 'weight2_t.npy')
        wf1 = np.load('Data/' + str(N_str) + 'weightf_t.npy')
        bf1 = np.load('Data/' + str(N_str) + 'biasf_t.npy')

        O2 = np.zeros((6, 1024))
        E = np.zeros(6)
        EE = np.zeros((6, w1.shape[4]))

        for j in range(0, w1.shape[4]):
            II = forward(data[:, :, :, n], w1[:, :, :, :, j], w2[:, :, :, :,
                                                                 j])
            error01 = II[1, :] - II[0, :]
            error02 = II[2, :] - II[0, :]
            error23 = np.zeros((6, 180))
            error13 = np.zeros((6, 180))
            for i in range(1, 7):
                error23[i - 1, :] = II[2 + i, :] - II[2, :]
            for i in range(1, 7):
                error13[i - 1, :] = II[2 + i, :] - II[1, :]

            O1 = np.dot(error01.reshape(1, 180), wf1[:, :, j]) + bf1[:, j]
            for i in range(0, 6):
                O2[i, :] = np.dot(error23[i, :].reshape(1, 180),
                                  wf1[:, :, j]) + bf1[:, j]

            for i in range(0, 6):
                E[i] = abs(O2[i, :] - O1).sum()
            EE[:, j] = E

            E_min_n = np.zeros((5))
            for i in range(0, w1.shape[4]):
                E_min_n[i] = np.argmin(EE[:, i])
        E_min[i:i + 5] = E_min_n
        EEE[:, i:i + 5] = EE
    #print(EE)
    print(E_min[E_min == ans].size)
    #print(E1_min_n[E1_min_n==ans].size)
    print(np.average(EEE, axis=1))
    print(np.argsort(np.average(EEE, axis=1)))
    #print(np.average(E2,axis=0))
    print(np.average(EEE, axis=1)[ans] / EEE.mean())
Esempio n. 3
0
def train_rcon(pic, weight1, weight2, n, bs, w):
    tf.reset_default_graph()
    learning_rate = 0.001
    #     training_steps = 10000
    #     batch_size = 128
    #     display_step = 200

    #     # Network Parameters
    #     num_input = 28 # MNIST data input (img shape: 28*28)
    #     timesteps = 28 # timesteps
    #     num_hidden = 128 # hidden layer num of features
    #     num_classes = 10 # MNIST total classes (0-9 digits)

    #     # tf Graph input
    #     X = tf.placeholder("float", [None, timesteps, num_input])
    #     Y = tf.placeholder("float", [None, num_classes])
    Num = 0
    learning_rate = 0.001
    num_steps = n
    batch_size = bs
    display_step = 1

    # Network Parameters
    num_input = 180  # MNIST data input (img shape: 28*28)
    num_classes = 2  # MNIST total classes (0-9 digits)
    timesteps = 2
    num_hidden = 128
    dropout = 0.75  # Dropout, probability to keep units
    # tf Graph input
    X = tf.placeholder(tf.float32, [None, timesteps, num_input])
    Y = tf.placeholder(tf.float32, [None, num_classes])
    keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)
    weights = {'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))}
    biases = {'out': tf.Variable(tf.random_normal([num_classes]))}
    logits, outputs = RNN(X, weights, biases)
    prediction = tf.nn.softmax(logits)

    # Define loss and optimizer
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss_op)

    # Evaluate model (with test logits, for dropout to be disabled)
    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initialize the variables (i.e. assign their default value)

    init = tf.global_variables_initializer()
    #     # Construct model
    #     logits = fullnet(X, weights, biases, keep_prob)
    #     prediction = tf.nn.softmax(logits)

    #     # Define loss and optimizer
    #     loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    #         logits=logits, labels=Y))
    #     optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    #     train_op = optimizer.minimize(loss_op)

    #     # Evaluate model
    #     correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    #     accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    #     # Initialize the variables (i.e. assign their default value)
    #     init = tf.global_variables_initializer()

    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)
        #batch_x1, batch_y1 = pic3_2(pic,weight1,weight2)
        #batch_x2, batch_y2 = pic3_2(pic,weight1,weight2)
        #test,ans=gentest(pic,weight1,weight2,ans,a,bs)
        for step in range(1, num_steps + 1):
            batch_x, batch_y = g_error(pic, weight1, weight2)
            # Run optimization op (backprop)
            sess.run(train_op,
                     feed_dict={
                         X: batch_x,
                         Y: batch_y,
                         keep_prob: dropout
                     })
            if step % display_step == 0 or step == 1:
                # Calculate batch loss and accuracy
                loss, acc = sess.run([loss_op, accuracy],
                                     feed_dict={
                                         X: batch_x,
                                         Y: batch_y,
                                         keep_prob: 1.0
                                     })
                print("Step " + str(step) + ", Minibatch Loss= " + \
                      "{:.4f}".format(loss) + ", Train Accuracy= " + \
                      "{:.3f}".format(acc))
                if acc >= 0.8:
                    if step > 5:
                        Num = 1
                        break

    #with tf.variable_scope("model", reuse=True):
        tr = 1
        EE = np.zeros((6, tr))
        data = np.load('data_4.npy')
        #EE2=np.zeros((6,tr))
        for j in range(0, tr):
            II = forward(data[:, :, :, w], weight1, weight2)
            data = np.zeros((1, 8, 180))
            data[:, 0, :] = II[0, :].reshape(1, 180)
            data[:, 1, :] = II[1, :].reshape(1, 180)
            data[:, 2, :] = II[2, :].reshape(1, 180)
            data[:, 4, :] = II[0, :].reshape(1, 180)
            data[:, 5, :] = II[2, :].reshape(1, 180)
            data[:, 6, :] = II[1, :].reshape(1, 180)
            sess.run(accuracy,
                     feed_dict={
                         X: data[:, 0:2, :],
                         Y: np.zeros((1, 2))
                     })
            l = outputs.eval(feed_dict={
                X: data[:, 0:2, :],
                Y: np.zeros((1, 2)),
                keep_prob: 1.0
            })
            l1 = outputs.eval(feed_dict={
                X: data[:, 4:6, :],
                Y: np.zeros((1, 2)),
                keep_prob: 1.0
            })
            for i in range(1, 7):
                III = II[2 + i, :].reshape(1, 180)
                data[:, 3, :] = III.reshape(1, 180)
                data[:, 7, :] = III.reshape(1, 180)
                l3 = outputs.eval(feed_dict={
                    X: data[:, 2:4, :],
                    Y: np.zeros((1, 2)),
                    keep_prob: 1.0
                })
                l4 = outputs.eval(feed_dict={
                    X: data[:, 6:8, :],
                    Y: np.zeros((1, 2)),
                    keep_prob: 1.0
                })
                #                 O1=RNN(data[:,0:2,:],weights, biases)
                #                 O2=RNN(data[:,2:4,:],weights, biases)
                #                 O3=RNN(data[:,4:6,:],weights, biases)
                #                 O4=RNN(data[:,6:8,:],weights, biases)
                EE[i - 1, j] = np.abs(l3 - l).sum() + np.abs(l4 - l1).sum()
            N = np.zeros(tr)
            for i in range(0, tr):
                N[i] = np.argmin(EE[:, i])
        sess.close()

        #loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x1,
        #Y: batch_y1,
        #keep_prob: 1.0})
        #loss1, acc1 = sess.run([loss_op, accuracy], feed_dict={X: batch_x2,
        #Y: batch_y2,
        #keep_prob: 1.0})
        #loss=(loss+loss1)/2
        #acc=(acc+acc1)/2
        #print("Step " + str(step) + ", Minibatch Loss= " + \
        #"{:.4f}".format(loss) + ", Valication Accuracy= " + \
        #"{:.3f}".format(acc))
        #print("Optimization Finished!")

        #Calculate accuracy for 256 MNIST test images
        #print("Testing Accuracy:", \
        #sess.run(accuracy, feed_dict={X: test,
        #Y: ans,
        #keep_prob: 1.0}))
#         p=prediction.eval(feed_dict={X: test,
#                             Y: ans,
#                             keep_prob: 1.0})
#         l=logits.eval(feed_dict={X: test,
#                             Y: ans,
#                             keep_prob: 1.0})
#w=weights['out'].eval(sess)
#b=biases['out'].eval(sess)

    return EE, N, Num
Esempio n. 4
0
def foward_error (n,m,ans,N_str):
    data=np.load('data_4.npy')
    #from loadone1 import load_sample as load
    w1=np.zeros((5,5,1,3,5))
    w2=np.zeros((5,5,3,6,5))
    wf1=np.zeros((180, 1024,5))
    bf1=np.zeros((1024,5))
    T=0
    #m=np.array([0,3,6])
    #M12,a12=load(data[:,:,m,12])
    i=0
    while i<5:
        w,ww,Num=train_cov(data[:,:,m,n],200,3,8)
        if Num==1:
            w1[:,:,:,:,i]=w
            w2[:,:,:,:,i]=ww
            i+=1
    for i in range (0,5):
        wf1[:,:,i],bf1[:,i],Num=train_fcon(data[:,:,:,n],w1[:,:,:,:,i],w2[:,:,:,:,i],50,8)
        if Num==1:
            T=T+1
            

    EE=np.zeros((6,5))
    EE2=np.zeros((6,5))
    for j in range (0,5):
        II=forward(data[:,:,:,n],w1[:,:,:,:,j],w2[:,:,:,:,j])
        error01=II[1,:]-II[0,:]
        error02=II[2,:]-II[0,:]
        error23=np.zeros((6,180))
        error13=np.zeros((6,180))
        for i in range (1,7):
            error23[i-1,:]=II[2+i,:]-II[2,:]
        for i in range (1,7):
            error13[i-1,:]=II[2+i,:]-II[1,:]
        O1=np.dot(error01.reshape(1,180),wf1[:,:,j])+bf1[:,j]
        O2=np.zeros((6,1024))
        for i in range (0,6):
            O2[i,:]=np.dot(error23[i,:].reshape(1,180),wf1[:,:,j])+bf1[:,j]
        E=np.zeros(6)
        for i in range (0,6):
            E[i]=abs(O2[i,:]-O1).sum()
        EE[:,j]=E
        O3=np.dot(error02.reshape(1,180),wf1[:,:,j])+bf1[:,j]
        O4=np.zeros((6,1024))
        for i in range (0,6):
            O4[i,:]=np.dot(error13[i,:].reshape(1,180),wf1[:,:,j])+bf1[:,j]
        EEE=np.zeros(6)
        for i in range (0,6):
            EEE[i]=abs(O4[i,:]-O3).sum()
        EE2[:,j]=EEE
        N=np.zeros(5)
        for i in range (0,5):
            N[i]=np.argmin(EE[:,i])
        NN=np.zeros(5)
        for i in range (0,5):
            NN[i]=np.argmin(EE2[:,i])
        NNN=np.zeros(5)
        for i in range (0,5):
            NNN[i]=np.argmin(EE2[:,i]+EE[:,i])
    print('x direction error:',np.argsort(np.average(EE,axis=1)))
    print('y direction error:',np.argsort(np.average(EE2,axis=1)))
    print('normalized correct x direction error:',np.average(EE,axis=1)[ans]/np.average(EE,axis=1).mean())
    print('normalized correct y direction error:',np.average(EE2,axis=1)[ans]/np.average(EE2,axis=1).mean())
    print('sum error:',np.argsort(np.average(EE2+EE,axis=1)))
    print('normalized correct sum error:',np.average(EE2+EE,axis=1)[ans]/np.average(EE+EE2,axis=1).mean())
    print('number of correct trials, x:',N)
    print('number of correct trials, y:',NN)
    print(T)
    np.save('Data/'+str(N_str)+'weight1.npy',w1)
    np.save('Data/'+str(N_str)+'weight2.npy',w2)
    np.save('Data/'+str(N_str)+'weightf.npy',wf1)
    np.save('Data/'+str(N_str)+'biasf.npy',bf1)
    np.save('Data/'+str(N_str)+'Error.npy',EE)
    np.save('Data/'+str(N_str)+'Error.npy',EE2)
    np.save('Data/'+str(N_str)+'Termination.npy',T)
Esempio n. 5
0
def foward_error(n, m, ans, N_str):
    data = np.load('data_4.npy')
    w1 = np.load('Data/' + str(N_str) + 'weight1c_ex.npy')
    w2 = np.load('Data/' + str(N_str) + 'weight2c_ex.npy')
    wf1 = np.load('Data/' + str(N_str) + 'weightf1_ex.npy')
    bf1 = np.load('Data/' + str(N_str) + 'biasf1_ex.npy')
    wf2 = np.load('Data/' + str(N_str) + 'weightf2_ex.npy')
    bf2 = np.load('Data/' + str(N_str) + 'biasf2_ex.npy')
    wf3 = np.load('Data/' + str(N_str) + 'weightf3_ex.npy')
    bf3 = np.load('Data/' + str(N_str) + 'biasf3_ex.npy')
    O2 = np.zeros((6, 1024))
    O3 = np.zeros((6, 1024))
    O4 = np.zeros((6, 1024))
    E = np.zeros(6)
    EE = np.zeros((6, w1.shape[4]))
    E_1 = np.zeros(6)
    EE_1 = np.zeros((6, w1.shape[4]))
    E_2 = np.zeros(6)
    EE_2 = np.zeros((6, w1.shape[4]))
    for j in range(0, w1.shape[4]):
        II = forward(data[:, :, :, n], w1[:, :, :, :, j], w2[:, :, :, :, j])
        error01 = II[1, :] - II[0, :]
        error02 = II[2, :] - II[0, :]
        error23 = np.zeros((6, 180))
        error13 = np.zeros((6, 180))
        for i in range(1, 7):
            error23[i - 1, :] = II[2 + i, :] - II[2, :]
        for i in range(1, 7):
            error13[i - 1, :] = II[2 + i, :] - II[1, :]

        O1 = np.dot(error01.reshape(1, 180), wf1[:, :, j]) + bf1[:, j]
        for i in range(0, 6):
            O2[i, :] = np.dot(error23[i, :].reshape(1, 180),
                              wf1[:, :, j]) + bf1[:, j]
        O1_2 = np.dot(
            np.hstack((error01, O1.reshape(1024))).reshape(1, 1204),
            wf2[:, :, j]) + bf2[:, j]

        for i in range(0, 6):
            #print((np.dot(np.hstack((error23[i,:],O2[i,:].reshape(1024))).reshape(1,1204),wf2[:,:,j])+bf2[:,j]).shape)
            O3[i, :] = np.dot(
                np.hstack((error23[i, :], O2[i, :].reshape(1024))).reshape(
                    1, 1204), wf2[:, :, j]) + bf2[:, j]

        O1_3 = np.dot(
            np.hstack((error01, O1_2.reshape(1024))).reshape(1, 1204),
            wf3[:, :, j]) + bf3[:, j]
        for i in range(0, 6):
            O4[i, :] = np.dot(
                np.hstack((error23[i, :], O3[i, :].reshape(1024))).reshape(
                    1, 1204), wf3[:, :, j]) + bf3[:, j]
        for i in range(0, 6):
            E[i] = abs(O2[i, :] - O1).sum()
        EE[:, j] = E
        for i in range(0, 6):
            E_1[i] = abs(O3[i, :] - O1_2).sum()
        EE_1[:, j] = E_1
        for i in range(0, 6):
            E_2[i] = abs(O4[i, :] - O1_3).sum()
        EE_2[:, j] = E_2

    E_min_n = np.zeros((5))
    for i in range(0, w1.shape[4]):
        E_min_n[i] = np.argmin(EE[:, i])
    #print(EE)
    print(E_min_n[E_min_n == ans].size)
    #print(E1_min_n[E1_min_n==ans].size)
    print(np.average(EE, axis=1))
    print(np.argsort(np.average(EE, axis=1)))
    #print(np.average(E2,axis=0))
    print(np.average(EE, axis=1)[ans] / EE.mean())

    E_min_n1 = np.zeros((5))
    for i in range(0, w1.shape[4]):
        E_min_n1[i] = np.argmin(EE_1[:, i])
    print(E_min_n1[E_min_n1 == ans].size)
    #print(E1_min_n[E1_min_n==ans].size)
    print(np.average(EE_1, axis=1))
    print(np.argsort(np.average(EE_1, axis=1)))
    #print(np.average(E2,axis=0))
    print(np.average(EE_1, axis=1)[ans] / EE_1.mean())

    E_min_n2 = np.zeros((5))
    for i in range(0, w1.shape[4]):
        E_min_n2[i] = np.argmin(EE_2[:, i])
    print(E_min_n2[E_min_n2 == ans].size)
    #print(E1_min_n[E1_min_n==ans].size)
    print(np.average(EE_2, axis=1))
    print(np.argsort(np.average(EE_2, axis=1)))
    #print(np.average(E2,axis=0))
    print(np.average(EE_2, axis=1)[ans] / EE_2.mean())