Exemple #1
0
def train_fcon(m):
    Num = 0
    learning_rate = 0.001
    num_steps = 3000
    batch_size = 128
    display_step = 100

    # Network Parameters
    num_input = 180  # MNIST data input (img shape: 28*28)
    num_classes = m  # MNIST total classes (0-9 digits)
    dropout = 0.75  # Dropout, probability to keep units
    # tf Graph input
    X = tf.placeholder(tf.float32, [None, num_input])
    Y = tf.placeholder(tf.float32, [None, num_classes])
    keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)
    weights = {
        'wd1': tf.Variable(tf.random_normal([180, 1024])),
        # 1024 inputs, 10 outputs (class prediction)
        'wd2': tf.Variable(tf.random_normal([1024, 512])),
        # 1024 inputs, 10 outputs (class prediction)
        'wd3': tf.Variable(tf.random_normal([512, 256])),
        # 1024 inputs, 10 outputs (class prediction)
        'out': tf.Variable(tf.random_normal([256, num_classes]))
    }

    biases = {
        'bd1': tf.Variable(tf.random_normal([1024])),
        'bd2': tf.Variable(tf.random_normal([512])),
        'bd3': tf.Variable(tf.random_normal([256])),
        'out': tf.Variable(tf.random_normal([num_classes]))
    }
    # Construct model
    logits = fullnet(X, weights, biases, keep_prob)
    prediction = tf.nn.softmax(logits)

    # Define loss and optimizer
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss_op)

    # Evaluate model
    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)
        #batch_x1, batch_y1 = pic3_2(pic,weight1,weight2)
        #batch_x2, batch_y2 = pic3_2(pic,weight1,weight2)
        #test,ans=gentest(pic,weight1,weight2,ans,a,bs)
        ac_max = 0

        for step in range(1, num_steps + 1):
            #batch_x, batch_y = g_error(pic,weight1,weight2)
            batch_x, batch_y = load([45, 4], 128, m)
            batch_x = batch_x.reshape(128, 180)
            v_x, v_y = load([45, 4], 128, m)
            v_x = v_x.reshape(128, 180)
            # Run optimization op (backprop)
            sess.run(train_op,
                     feed_dict={
                         X: batch_x,
                         Y: batch_y[:, 0:m],
                         keep_prob: dropout
                     })
            if step % display_step == 0 or step == 1:
                # Calculate batch loss and accuracy
                loss, acc = sess.run([loss_op, accuracy],
                                     feed_dict={
                                         X: batch_x,
                                         Y: batch_y[:, 0:m],
                                         keep_prob: 1.0
                                     })
                ac = sess.run(accuracy,
                              feed_dict={
                                  X: v_x,
                                  Y: v_y[:, 0:m],
                                  keep_prob: 1.0
                              })
                if ac_max < ac:
                    ac_max = ac
                    save_path = saver.save(sess,
                                           'model1/model2' + str(m) + '.ckpt')

                #tf.train.Saver().save(sess, TrainConfig.CKPT_PATH + '/checkpoint', global_step=step)
                print("Step " + str(step) + ", Minibatch Loss= " + \
                      "{:.4f}".format(loss) + ", Train Accuracy= " + \
                      "{:.3f}".format(acc))
                if ac == 1:
                    if step > 5:
                        Num = 1
                        break
                #loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x1,
                #Y: batch_y1,
                #keep_prob: 1.0})
                #loss1, acc1 = sess.run([loss_op, accuracy], feed_dict={X: batch_x2,
                #Y: batch_y2,
                #keep_prob: 1.0})
                #loss=(loss+loss1)/2
                #acc=(acc+acc1)/2
                #print("Step " + str(step) + ", Minibatch Loss= " + \
                #"{:.4f}".format(loss) + ", Valication Accuracy= " + \
                #"{:.3f}".format(acc))
        #print("Optimization Finished!")

        #Calculate accuracy for 256 MNIST test images
        #step=np.argmax(ac)
        saver.restore(sess, 'model1/model2' + str(m) + '.ckpt')

        test, ans = load_test([45, 4], 100, m)
        test = test.reshape(100, 180)
        acc = np.zeros(m)
        for i in range(0, m):
            acc[i] = sess.run(accuracy,
                              feed_dict={
                                  X: test[i * 10:(i + 1) * 10, :].reshape(
                                      10, 180),
                                  Y: ans[i * 10:(i + 1) * 10,
                                         0:m].reshape(10, m),
                                  keep_prob: 1.0
                              })
#         p=prediction.eval(feed_dict={X: test,
#                             Y: ans,
#                             keep_prob: 1.0})
#         l=logits.eval(feed_dict={X: test,
#                             Y: ans,
#                             keep_prob: 1.0})

#return wd1,bd1,wd2,bd2,wd3,bd3,Num
    return acc
def train_rcon(m):
    tf.reset_default_graph()
    learning_rate = 0.001
    #     training_steps = 10000
    #     batch_size = 128
    #     display_step = 200

    #     # Network Parameters
    #     num_input = 28 # MNIST data input (img shape: 28*28)
    #     timesteps = 28 # timesteps
    #     num_hidden = 128 # hidden layer num of features
    #     num_classes = 10 # MNIST total classes (0-9 digits)

    #     # tf Graph input
    #     X = tf.placeholder("float", [None, timesteps, num_input])
    #     Y = tf.placeholder("float", [None, num_classes])
    Num = 0
    learning_rate = 0.001
    num_steps = 3000
    batch_size = 128
    display_step = 100

    # Network Parameters
    num_input = 130  # MNIST data input (img shape: 28*28)
    num_classes = m  # MNIST total classes (0-9 digits)
    timesteps = 40
    num_hidden = 128
    dropout = 0.75  # Dropout, probability to keep units
    # tf Graph input
    X = tf.placeholder(tf.float32, [None, timesteps, num_input])
    Y = tf.placeholder(tf.float32, [None, num_classes])
    keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)
    weights = {'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))}
    biases = {'out': tf.Variable(tf.random_normal([num_classes]))}
    logits, outputs = RNN(X, weights, biases)
    prediction = tf.nn.softmax(logits)

    # Define loss and optimizer
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss_op)

    # Evaluate model (with test logits, for dropout to be disabled)
    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initialize the variables (i.e. assign their default value)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    #     # Construct model
    #     logits = fullnet(X, weights, biases, keep_prob)
    #     prediction = tf.nn.softmax(logits)

    #     # Define loss and optimizer
    #     loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    #         logits=logits, labels=Y))
    #     optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    #     train_op = optimizer.minimize(loss_op)

    #     # Evaluate model
    #     correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    #     accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    #     # Initialize the variables (i.e. assign their default value)
    #     init = tf.global_variables_initializer()

    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)
        #batch_x1, batch_y1 = pic3_2(pic,weight1,weight2)
        #batch_x2, batch_y2 = pic3_2(pic,weight1,weight2)
        #test,ans=gentest(pic,weight1,weight2,ans,a,bs)
        ac_max = 0
        for step in range(1, num_steps + 1):
            #             batch_x, batch_y = rtrain(N,ans)
            batch_x, batch_y = load([40, 130], 128, m)
            v_x, v_y = load([40, 130], 128, m)
            # Run optimization op (backprop)
            sess.run(train_op,
                     feed_dict={
                         X: batch_x,
                         Y: batch_y[:, 0:m],
                         keep_prob: dropout
                     })
            if step % display_step == 0 or step == 1:
                # Calculate batch loss and accuracy
                loss, acc = sess.run([loss_op, accuracy],
                                     feed_dict={
                                         X: batch_x,
                                         Y: batch_y[:, 0:m],
                                         keep_prob: 1.0
                                     })
                los, ac = sess.run([loss_op, accuracy],
                                   feed_dict={
                                       X: batch_x,
                                       Y: batch_y[:, 0:m],
                                       keep_prob: 1.0
                                   })
                print("Step " + str(step) + ", Minibatch Loss= " + \
                      "{:.4f}".format(loss) + ", Train Accuracy= " + \
                      "{:.3f}".format(acc) + "Validation Accuracy= " + \
                     "{:.3f}".format(ac))
                if ac_max < ac:
                    save_path = saver.save(sess,
                                           'model/model2' + str(m) + '.ckpt')
                    ac_max = ac
                if ac == 1:
                    if step > 5:
                        Num = 1
                        break
#         A,B=rref(N)
#         l0=outputs.eval(feed_dict={X: A.reshape(1,40,130),
#                            Y: B.reshape(1,2),
#                            keep_prob: 1.0})
#         d,a=rtest(N,ans)
#         EE=np.zeros(6)
#         acc=0
#         for w in range (0,6):
#             acc+=sess.run(accuracy, feed_dict={X: d[w,:,:].reshape(1,40,130), Y: a[w,:].reshape(1,2)})
#             l=outputs.eval(feed_dict={X: d[w,:,:].reshape(1,40,130),
#                             Y: a[w,:].reshape(1,2),
#                             keep_prob: 1.0})

#             l=outputs.eval(feed_dict={X: d[w*2,:,:].reshape(1,40,130),
#                            Y: a[w*2,:].reshape(1,2),
#                            keep_prob: 1.0})
#             acc+=sess.run(accuracy, feed_dict={X: d[w*2,:,:].reshape(1,40,130), Y: a[w*2,:].reshape(1,2)})

#             l=outputs.eval(feed_dict={X: d[w*2,:,:].reshape(1,40,130),
#                            Y: a[w*2,:].reshape(1,2),
#                            keep_prob: 1.0})
#             #acc+=accuracy
#             acc+=sess.run(accuracy, feed_dict={X: d[w*2+1,:,:].reshape(1,40,130), Y: a[w*2+1,:].reshape(1,2)})
#acc+=accuracy
#             l1=outputs.eval(feed_dict={X: d[w*2+1,:,:].reshape(1,20,130),
#                            Y: a[w*2+1,:].reshape(1,2),
#                            keep_prob: 1.0})
#             EE[w]=np.abs(l-l0).sum()
#         N=np.argmin(EE)
#         acc=acc/12

#with tf.variable_scope("model", reuse=True):
#         tr=1
#         EE=np.zeros((6,tr))
#         data=np.load('data_4.npy')
#     #EE2=np.zeros((6,tr))
#         for j in range (0,tr):
#             II=forward(data[:,:,:,w],weight1,weight2)
#             data=np.zeros((1,8,180))
#             data[:,0,:]=II[0,:].reshape(1,180)
#             data[:,1,:]=II[1,:].reshape(1,180)
#             data[:,2,:]=II[2,:].reshape(1,180)
#             data[:,4,:]=II[0,:].reshape(1,180)
#             data[:,5,:]=II[2,:].reshape(1,180)
#             data[:,6,:]=II[1,:].reshape(1,180)
#             sess.run(accuracy, feed_dict={X: data[:,0:2,:], Y: np.zeros((1,2))})
#             l=outputs.eval(feed_dict={X: data[:,0:2,:],
#                             Y: np.zeros((1,2)),
#                             keep_prob: 1.0})
#             l1=outputs.eval(feed_dict={X: data[:,4:6,:],
#                             Y: np.zeros((1,2)),
#                             keep_prob: 1.0})
#             for i in range (1,7):
#                 III=II[2+i,:].reshape(1,180)
#                 data[:,3,:]=III.reshape(1,180)
#                 data[:,7,:]=III.reshape(1,180)
#                 l3=outputs.eval(feed_dict={X: data[:,2:4,:],
#                             Y: np.zeros((1,2)),
#                             keep_prob: 1.0})
#                 l4=outputs.eval(feed_dict={X: data[:,6:8,:],
#                             Y: np.zeros((1,2)),
#                             keep_prob: 1.0})
# #                 O1=RNN(data[:,0:2,:],weights, biases)
# #                 O2=RNN(data[:,2:4,:],weights, biases)
# #                 O3=RNN(data[:,4:6,:],weights, biases)
# #                 O4=RNN(data[:,6:8,:],weights, biases)
#                 EE[i-1,j]=np.abs(l3-l).sum()+np.abs(l4-l1).sum()
#             N=np.zeros(tr)
#             for i in range (0,tr):
#                 N[i]=np.argmin(EE[:,i])
#         sess.close()

#                 #loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x1,
#                                                                      #Y: batch_y1,
#                                                                      #keep_prob: 1.0})
#                 #loss1, acc1 = sess.run([loss_op, accuracy], feed_dict={X: batch_x2,
#                                                                      #Y: batch_y2,
#                                                                      #keep_prob: 1.0})
#                 #loss=(loss+loss1)/2
#                 #acc=(acc+acc1)/2
#                 #print("Step " + str(step) + ", Minibatch Loss= " + \
#                       #"{:.4f}".format(loss) + ", Valication Accuracy= " + \
#                       #"{:.3f}".format(acc))
#         #print("Optimization Finished!")

#         #Calculate accuracy for 256 MNIST test images
#         #print("Testing Accuracy:", \
#              #sess.run(accuracy, feed_dict={X: test,
#                                            #Y: ans,
#                                            #keep_prob: 1.0}))
# #         p=prediction.eval(feed_dict={X: test,
# #                             Y: ans,
# #                             keep_prob: 1.0})
# #         l=logits.eval(feed_dict={X: test,
# #                             Y: ans,
# #                             keep_prob: 1.0})
        saver.restore(sess, 'model/model2' + str(m) + '.ckpt')
        x, y = load_test([40, 130], 100, m)
        acc = np.zeros(m)
        for i in range(0, m):
            acc[i] = sess.run(accuracy,
                              feed_dict={
                                  X: x[i * 10:i * 10 + 10, :],
                                  Y: y[i * 10:i * 10 + 10, 0:m],
                                  keep_prob: 1.0
                              })
#         #w=weights['out'].eval(sess)
#         #b=biases['out'].eval(sess)

    return acc
Exemple #3
0
def train_cov(m):
    # Training Parameters
    Num = 0
    learning_rate = 0.0001
    num_steps = 3000
    batch_size = 8
    display_step = 100

    # Network Parameters
    num_input = 3000  # MNIST data input (img shape: 28*28)
    num_classes = m  # MNIST total classes (0-9 digits)
    dropout = 0.75  # Dropout, probability to keep units

    # tf Graph input
    X = tf.placeholder(tf.float32, [None, num_input])
    Y = tf.placeholder(tf.float32, [None, num_classes])
    keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)
    # Store layers weight & bias
    weights = {
        # 5x5 conv, 1 input, 32 outputs
        'wc1': tf.Variable(tf.random_normal([10, 10, 1, 3])),
        # 5x5 conv, 32 inputs, 64 outputs
        'wc2': tf.Variable(tf.random_normal([25, 25, 3, 6])),
        # fully connected, 7*7*64 inputs, 1024 outputs
        'wd1': tf.Variable(tf.random_normal([288, 1024])),
        # 1024 inputs, 10 outputs (class prediction)
        'out': tf.Variable(tf.random_normal([1024, num_classes]))
    }

    biases = {
        'bc1': tf.Variable(tf.random_normal([3])),
        'bc2': tf.Variable(tf.random_normal([6])),
        'bd1': tf.Variable(tf.random_normal([1024])),
        'out': tf.Variable(tf.random_normal([num_classes]))
    }

    # Construct model
    logits = conv_net(X, weights, biases, keep_prob)
    prediction = tf.nn.softmax(logits)

    # Define loss and optimizer
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(loss_op)

    # Evaluate model
    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    # weight1=np.zeros((5,5,1,3))
    # weight2=np.zeros((5,5,3,6))
    # acc1=0
    # Start training
    #     test=np.zeros((16,784))
    #     answer_test=np.zeros((16,2))
    #     for k in range(0,16):
    #         test[k,:]=A.reshape(784,)/255
    #         answer_test[k,:]=[1,0]
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)
        #batch_x1, batch_y1 = load(A)
        #batch_x2, batch_y2 = load(A)
        #a=np.zeros(num_steps+1)
        ac_max = 0
        for step in range(1, num_steps + 1):
            #batch_x, batch_y = load(A)
            batch_x, batch_y = load([50, 60], 8, m)
            v_x, v_y = load([50, 60], 8, m)
            # Run optimization op (backprop)
            sess.run(train_op,
                     feed_dict={
                         X: batch_x.reshape(8, 3000),
                         Y: batch_y[:, 0:m],
                         keep_prob: dropout
                     })
            if step % display_step == 0 or step == 1:
                # Calculate batch loss and accuracy
                loss, acc = sess.run(
                    [loss_op, accuracy],
                    feed_dict={
                        X: batch_x.reshape(8, 3000),
                        Y: batch_y[:, 0:m],
                        keep_prob: 1.0
                    })
                los, ac = sess.run([loss_op, accuracy],
                                   feed_dict={
                                       X: v_x.reshape(8, 3000),
                                       Y: v_y[:, 0:m],
                                       keep_prob: 1.0
                                   })
                print("Step " + str(step) + ", Minibatch Loss= " + \
                      "{:.4f}".format(loss) + ", Train Accuracy= " + \
                      "{:.3f}".format(acc)+",Validation Accuracy= " +\
                     "{:.3f}".format(ac))
                #a[step]=ac
                if ac_max < ac:
                    save_path = saver.save(sess,
                                           'model/model1' + str(m) + '.ckpt')
                    ac_max = ac
                if ac == 1:
                    if step > 5:
                        Num = 1
                        break

                #loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x1,
                #Y: batch_y1,
                #keep_prob: 1.0})
                #loss1, acc1 = sess.run([loss_op, accuracy], feed_dict={X: batch_x2,
                #Y: batch_y2,
                #keep_prob: 1.0})
                #loss=(loss+loss1)/2
                #acc=(acc+acc1)/2
    #             if acc>acc1:
    #                 print('1')
    #                 weight1=weights['wc1'].eval(sess)
    #                 weight2=weights['wc2'].eval(sess)
    #                 acc1=acc
    #print("Step " + str(step) + ", Minibatch Loss= " + \
    #"{:.4f}".format(loss) + ", Valication Accuracy= " + \
    #"{:.3f}".format(acc))
    #print("Optimization Finished!")

    #Calculate accuracy for 256 MNIST test images
    #step=np.argmax(a)
        saver.restore(sess, 'model/model1' + str(m) + '.ckpt')
        x, y = load_test([50, 60], 100, m)
        acc = np.zeros(m)
        for i in range(0, m):
            acc[i] = sess.run(accuracy,
                              feed_dict={
                                  X: x[i * 10:i * 10 + 8, :].reshape(8, 3000),
                                  Y: y[i * 10:i * 10 + 8, 0:m].reshape(8, m),
                                  keep_prob: 1.0
                              })
#         weight1=weights['wc1'].eval(sess)
#         weight2=weights['wc2'].eval(sess)
#     return weight1,weight2,Num
    return acc