コード例 #1
0
def train_inception_v4_stacked_lstm(width,
                                    height,
                                    n_lstm=7,
                                    dropout_keep_prob=0.8,
                                    learning_rate=0.00001,
                                    n_epochs=200):
    print('...... loading the dataset ......')
    train_set_x,train_set_y,test_set_x,test_set_y = pd.load_data_set(width,height)
    
    x = tf.placeholder(tf.float32,[None,width*height]) # input
    y = tf.placeholder(tf.float32,[None,n_classes])    # label
    keep_prob = tf.placeholder(tf.float32)             # dropout_keep_prob
    
    y_inception = inception_v4(x,width,height,keep_prob)
    y_pred = stacked_lstm_layer(y_inception,n_lstm)
    
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,y))
    optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
    
    correct_prediction = tf.equal(tf.argmax(y_pred,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

    best_acc = 0.
    best_acc_epoch = 0
    
    init = tf.initialize_all_variables()
    with tf.Session() as sess:
        print('...... initializating varibale ...... ')
        sess.run(init)
        
        print('...... start to training ......')
        for epoch_i in range(n_epochs):
            # Training 
            train_accuracy = 0.
            for batch_i in range(n_train_example//pic_batch_size):
                
                batch_xs = train_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]
                batch_ys = train_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]
                _,loss,acc = sess.run([optimizer, cost, accuracy],
                                           feed_dict={
                                                x:batch_xs,
                                                y:batch_ys,
                                                keep_prob:dropout_keep_prob}
                                                )
                print('epoch:{0},minibatch:{1},cost:{2},train_accuracy:{3}'.format(epoch_i,batch_i,loss,acc))
                train_accuracy += acc

            train_accuracy /= (n_train_example//pic_batch_size)
            print('----epoch:{0},training acc = {1}'.format(epoch_i,train_accuracy))
            
            # Validation
            valid_accuracy = 0.
            for batch_i in range(n_test_example//pic_batch_size):
                batch_xs = test_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]
                batch_ys = test_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]
                valid_accuracy += sess.run(accuracy,
                                           feed_dict={
                                                x:batch_xs,
                                                y:batch_ys,
                                                keep_prob:1.0})
            valid_accuracy /= (n_test_example//pic_batch_size)
            print('epoch:{0},train_accuracy:{1},valid_accuracy:{2}'.format(epoch_i,train_accuracy,valid_accuracy))
            if(train_accuracy > best_acc):
                best_acc_epoch = epoch_i
                best_acc = train_accuracy
            print('========epoch:{0},current best accuracy = {1} in epoch_{2}'.format(epoch_i,best_acc,best_acc_epoch))
    
    print('...... training finished ......')
    print('...... best accuracy{0} @ epoch_{1}......'.format(best_acc,best_acc_epoch))                                             
コード例 #2
0
def train_inception_v3(width=256, height=256):

    d_start = datetime.datetime.now()

    print('...... loading the dataset ......')
    train_set_x, train_set_y, test_set_x, test_set_y = pd.load_data_set(
        width, height)

    x = tf.placeholder(tf.float32, [None, width * height])  # input
    y = tf.placeholder(tf.float32, [None, n_class])  # label
    keep_prob = tf.placeholder(tf.float32)  # dropout_keep_prob

    y_inception = inception_v3(x, width, height)
    y_pred = prediction(y_inception, keep_prob)

    cost = tf.reduce_mean(
        -tf.reduce_sum(y * tf.log(y_pred), reduction_indices=1))
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    best_acc = 0.

    init = tf.initialize_all_variables()
    with tf.Session() as sess:
        print('...... initializating varibale ...... ')
        sess.run(init)

        n_epochs = 100
        print('...... start to training ......')
        for epoch_i in range(n_epochs):
            # Training
            train_accuracy = 0.
            for batch_i in range(n_train_example // batch_size):

                batch_xs = train_set_x[batch_i * batch_size:(batch_i + 1) *
                                       batch_size]
                batch_ys = train_set_y[batch_i * batch_size:(batch_i + 1) *
                                       batch_size]
                _, loss, acc = sess.run([optimizer, cost, accuracy],
                                        feed_dict={
                                            x: batch_xs,
                                            y: batch_ys,
                                            keep_prob: dropout_keep_prob
                                        })
                print('epoch:{0},minibatch:{1},cost:{2},train_accuracy:{3}'.
                      format(epoch_i, batch_i, loss, acc))
                train_accuracy += acc

            train_accuracy /= (n_train_example // batch_size)
            print('----epoch:{0},training acc = {1}'.format(
                epoch_i, train_accuracy))

            # Validation
            valid_accuracy = 0.
            for batch_i in range(n_test_example // batch_size):
                batch_xs = test_set_x[batch_i * batch_size:(batch_i + 1) *
                                      batch_size]
                batch_ys = test_set_y[batch_i * batch_size:(batch_i + 1) *
                                      batch_size]
                valid_accuracy += sess.run(accuracy,
                                           feed_dict={
                                               x: batch_xs,
                                               y: batch_ys,
                                               keep_prob: 1.0
                                           })
            valid_accuracy /= (n_test_example // batch_size)
            print('epoch:{0},train_accuracy:{1},valid_accuracy:{2}'.format(
                epoch_i, train_accuracy, valid_accuracy))
            if (train_accuracy > best_acc):
                best_acc = train_accuracy

    d_end = datetime.datetime.now()
    print('...... training finished ......')
    print('...... best accuracy:{0} ......'.format(best_acc))
    print('...... running time:{0}'.format((d_end - d_start).seconds))
コード例 #3
0
def train_yawn(width=256, height=256):

    d_start = datetime.datetime.now()

    print('...... loading the dataset ......')
    train_set_x, train_set_y, test_set_x, test_set_y = pd.load_data_set(
        width, height)

    train_mean = np.mean(train_set_x, axis=0)
    test_mean = np.mean(test_set_x, axis=0)

    x = tf.placeholder(tf.float32, [None, width * height])  # input
    y = tf.placeholder(tf.float32, [None, n_class])  # label
    corrupt_prob = tf.placeholder(tf.float32, [1])

    ae = autoencoder(x,
                     corrupt_prob,
                     width,
                     height,
                     dimensions=[5000, 300, 200])
    y_pred = classify(ae['z'])
    print('y_pred,shape = {}'.format(y_pred.get_shape()))

    cost = ae['cost']
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    best_acc = 0.

    init = tf.initialize_all_variables()
    with tf.Session() as sess:
        print('...... initializating varibale ...... ')
        sess.run(init)

        n_epochs = 5
        print('...... start to training ......')
        for epoch_i in range(n_epochs):
            # Training
            train_accuracy = 0.
            for batch_i in range(n_train_example // batch_size):

                batch_xs = train_set_x[batch_i * batch_size:(batch_i + 1) *
                                       batch_size]
                batch_xs_norm = np.array(
                    [img - train_mean for img in batch_xs])
                batch_ys = train_set_y[batch_i * batch_size:(batch_i + 1) *
                                       batch_size]
                _, loss, acc = sess.run([optimizer, cost, accuracy],
                                        feed_dict={
                                            x: batch_xs_norm,
                                            y: batch_ys,
                                            corrupt_prob: [1.0]
                                        })
                #print('epoch:{0},minibatch:{1},y_res:{2}'.format(epoch_i,batch_i,yy_res))
                #print('epoch:{0},minibatch:{1},y_pred:{2}'.format(epoch_i,batch_i,yy_pred))
                print('epoch:{0},minibatch:{1},cost:{2},train_accuracy:{3}'.
                      format(epoch_i, batch_i, loss, acc))
                train_accuracy += acc

            train_accuracy /= (n_train_example // batch_size)
            print('----epoch:{0},training acc = {1}'.format(
                epoch_i, train_accuracy))

            # Validation
            valid_accuracy = 0.
            for batch_i in range(n_test_example // batch_size):
                batch_xs = test_set_x[batch_i * batch_size:(batch_i + 1) *
                                      batch_size]
                batch_xs_norm = np.array([img - test_mean for img in batch_xs])
                batch_ys = test_set_y[batch_i * batch_size:(batch_i + 1) *
                                      batch_size]
                valid_accuracy += sess.run(accuracy,
                                           feed_dict={
                                               x: batch_xs,
                                               y: batch_ys,
                                               corrupt_prob: [0.0]
                                           })
            valid_accuracy /= (n_test_example // batch_size)
            print('epoch:{0},train_accuracy:{1},valid_accuracy:{2}'.format(
                epoch_i, train_accuracy, valid_accuracy))
            if (train_accuracy > best_acc):
                best_acc = train_accuracy
        # draw
        n_examples = 10
        test_xs = test_set_x[n_examples * 130:n_examples * 131]
        test_xs_norm = np.array([img - train_mean for img in test_xs])
        recon = sess.run(ae['y'],
                         feed_dict={
                             x: test_xs_norm,
                             corrupt_prob: [0.0]
                         })
        fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
        for example_i in range(n_examples):
            axs[0][example_i].imshow(
                np.reshape(test_xs[example_i, :], (width, height)))
            axs[1][example_i].imshow(
                np.reshape([recon[example_i, :] + train_mean],
                           (width, height)))
        fig.show()
        plt.draw()
        plt.waitforbuttonpress()

    d_end = datetime.datetime.now()
    print('...... training finished ......')
    print('...... best accuracy:{0} ......'.format(best_acc))
    print('...... running time:{0} minutes ......'.format(
        (d_end - d_start).seconds / 60))
コード例 #4
0
def train_res_blstm(width=256,height=256):
    print('...... loading the dataset ......')
    train_set_x,train_set_y,test_set_x,test_set_y = pd.load_data_set(width,height)
    
    print('...... building the model ......')
    x = tf.placeholder(tf.float32,[None,width*height])
    y = tf.placeholder(tf.float32,[None,n_classes])
    # TensorFlow LSTM cell reuqires 2*n_hidden length(state&cell)
    istate_fw = tf.placeholder(tf.float32,[None,2*n_hidden_units])
    istate_bw = tf.placeholder(tf.float32,[None,2*n_hidden_units])
    
    y_res = residual_network(x)
    y_pred = blstm(y_res,istate_fw,istate_bw)
    
    # Define loss and training functions
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,y))
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    
    # Monitor Accuracy
    correct_prediction = tf.equal(tf.argmax(y_pred,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

    best_acc = 0.
    
    init = tf.initialize_all_variables()
    # Session
    with tf.Session() as sess:
        print('...... initializating varibale ...... ')
        sess.run(init)
        
        n_epochs = 100
        print('...... start to training ......')
        for epoch_i in range(n_epochs):
            # Training 
            train_accuracy = 0.
            for batch_i in range(n_train_example//pic_batch_size):
                
                batch_xs = train_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]
                batch_ys = train_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]
                _,loss,acc,yy = sess.run([optimizer,cost,accuracy,y_pred],
                                           feed_dict={
                                                x:batch_xs,
                                                y:batch_ys,
                                                istate_fw:np.zeros((video_batch_size,2*n_hidden_units)),
                                                istate_bw:np.zeros((video_batch_size,2*n_hidden_units))
                                                })
                print('epoch:{0},minibatch:{1},cost:{2},train_accuracy:{3}'.format(epoch_i,batch_i,loss,acc))
                train_accuracy += acc

            train_accuracy /= (n_train_example//pic_batch_size)
            print('epoch:{0},training acc = {1}'.format(epoch_i,train_accuracy))
            
            # Validation
            valid_accuracy = 0.
            for batch_i in range(n_test_example//pic_batch_size):
                batch_xs = test_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]
                batch_ys = test_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]
                valid_accuracy += sess.run(accuracy,
                                           feed_dict={
                                                x:batch_xs,
                                                y:batch_ys,
                                                istate_fw:np.zeros((video_batch_size,2*n_hidden_units)),
                                                istate_bw:np.zeros((video_batch_size,2*n_hidden_units))
                                                })
            valid_accuracy /= (n_test_example//pic_batch_size)
            print('====epoch:{0},train_accuracy:{1},valid_accuracy:{2}'.format(epoch_i,train_accuracy,valid_accuracy))
            if(train_accuracy > best_acc):
                best_acc = train_accuracy
    
    print('...... training finished ......')
    print('...... best accuracy{0} ......'.format(best_acc))