コード例 #1
0
def run_training():
    print(FLAGS.train_or_validation)
    if FLAGS.train_or_validation == 'train':
        print('distorted_inputs')
        data_files_ = TRAIN_FILE
        images, bboxes = grasp_img_proc.distorted_inputs(
                  [data_files_], FLAGS.num_epochs, batch_size=FLAGS.batch_size)
    else:
        print('inputs')
        data_files_ = VALIDATE_FILE
        images, bboxes = grasp_img_proc.inputs([data_files_])
    
    x, y, rad = bboxes_to_grasps(bboxes)
    
    # images_np = np.array(images)
    degree = inference(images) # list
    # tangent of 85 degree is 11 

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

    rad_hat_confined = tf.minimum(11., tf.maximum(-11., rad_hat))
    rad_confined = tf.minimum(11., tf.maximum(-11., rad))
    # Loss function
    gamma = tf.constant(10.)
    loss = tf.reduce_mean(tf.pow(x_hat -x, 2) +tf.pow(y_hat -y, 2) + gamma*tf.pow(rad_hat_confined - rad_confined, 2))
    train_op = tf.train.AdamOptimizer(epsilon=0.1).minimize(loss)
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #save/restore model
    d={}
    l = ['w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1', 'b_fc1', 'w_fc2', 'b_fc2']
    for i in l:
        d[i] = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if v.name == i+':0'][0]
    
    dg={}
    lg = ['w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1', 'b_fc1', 'w_fc2', 'b_fc2', 'w_output', 'b_output']
    for i in lg:
        dg[i] = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if v.name == i+':0'][0]

    saver = tf.train.Saver(d)
    saver_g = tf.train.Saver(dg, max_to_keep=20)
    #saver.restore(sess, "/root/grasp/grasp-detection/models/imagenet/m2/m2.ckpt")
    saver_g.restore(sess, FLAGS.model_path)
    try:
        count = 0
        step = 0
        start_time = time.time()
        while not coord.should_stop():
            start_batch = time.time()
            #train
            if FLAGS.train_or_validation == 'train':
                _, loss_value, images_np, x_value, x_model, y_value, y_model, rad_value, rad_model = \
                                    sess.run([train_op, loss, images, x, x_hat, y, y_hat, rad, rad_hat])
                duration = time.time() - start_batch
                if step % 100 == 0:
                    print('Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | y = %s\n | y_hat = %s\n | rad = %s\n | rad_hat = %s\n | (%.3f sec/batch\n') \
                            %(step, loss_value, x_value[:3], x_model[:3], y_value[:3], y_model[:3], rad_value[:3], rad_model[:3], duration)
                
                if step % 1000 == 0:   
                    filename = MODEL_SAVE_PATH + '/step_' + str(step)
                    if not os.path.exists(filename):
                        os.mkdir(filename)             
                    saver_g.save(sess, filename + '/m4.ckpt')
                    # cv2.imshow('bbox', images_np[1]) ###################################
                    # cv2.waitKey(0)
                    # print(images_np[0])
                    
                    if step == 100000:
                        sess.close()
                    
            else:
                bbox_hat = grasp_to_bbox(x_hat, y_hat, rad_hat)
                bbox_value, bbox_model, x_value, x_model, y_value, y_model, rad_value, rad_model = sess.run([bboxes, bbox_hat, x, x_hat, y, y_hat, rad, rad_hat])
                bbox_value = np.reshape(bbox_value, -1)
                bbox_value = [(bbox_value[0],bbox_value[1]),(bbox_value[2],bbox_value[3]),(bbox_value[4],bbox_value[5]),(bbox_value[6],bbox_value[7])]  # bbox_value = [(bbox_value[0]*0.35,bbox_value[1]*0.47),(bbox_value[2]*0.35,bbox_value[3]*0.47),(bbox_value[4]*0.35,bbox_value[5]*0.47),(bbox_value[6]*0.35,bbox_value[7]*0.47)]
                p1 = Polygon(bbox_value)
                p2 = Polygon(bbox_model)
                iou = p1.intersection(p2).area / (p1.area +p2.area -p1.intersection(p2).area) 
                angle_diff = np.abs(np.arctan(rad_model)*180/np.pi -np.arctan(rad_value)*180/np.pi)
                duration = time.time() -start_batch
                # if angle_diff < 30. and iou >= 0.25:
                count+=1
                print('image: %d | duration = %.2f | count = %d | iou = %.2f | angle_difference = %.2f' %(step, duration, count, iou, angle_diff))
                print('x=',x_value,x_model,' y=',y_value, y_model,' rad=',rad_value, rad_model,')
            step +=1
    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps, %.1f min.' % (FLAGS.num_epochs, step, (time.time()-start_time)/60))
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
コード例 #2
0
def run_training():
    print(FLAGS.train_or_validation)
    if FLAGS.train_or_validation == 'train':
        print('distorted_inputs')
        data_files_ = TRAIN_FILE
        images, bboxes = grasp_img_proc.distorted_inputs(
            [data_files_], FLAGS.num_epochs, batch_size=FLAGS.batch_size)
    else:
        print('inputs')
        data_files_ = VALIDATE_FILE
        images, bboxes = grasp_img_proc.inputs([data_files_])

    x, y, degree, h, w = bboxes_to_grasps(
        bboxes)  # x, y, tan, h, w = bboxes_to_grasps(bboxes)

    # images_np = np.array(images)
    x_hat, y_hat, degree_hat, h_hat, w_hat = tf.unstack(inference(images),
                                                        axis=1)  # list
    # x_hat, y_hat, tan_hat, h_hat, w_hat = tf.unstack(inference(images), axis=1) # list
    # tangent of 85 degree is 11

    # tan_hat_confined = tf.minimum(11., tf.maximum(-11., tan_hat))
    # tan_confined = tf.minimum(11., tf.maximum(-11., tan))
    # Loss function
    gamma = tf.constant(0.001)
    loss = tf.reduce_sum(
        tf.pow(x_hat - x, 2) + tf.pow(y_hat - y, 2) +
        gamma * tf.pow(degree_hat - degree, 2) + tf.pow(h_hat - h, 2) +
        tf.pow(w_hat - w, 2))
    train_op = tf.train.AdamOptimizer(epsilon=0.1).minimize(loss)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #save/restore model
    d = {}
    l = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2', 'w_fc2', 'b_fc2', 'w_fc3', 'b_fc3',
        'w_output', 'b_output'
    ]
    for i in l:
        d[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    dg = {}
    lg = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2', 'w_output', 'b_output'
    ]
    for i in lg:
        dg[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    saver = tf.train.Saver(d)
    saver_g = tf.train.Saver(dg, max_to_keep=20)
    #saver.restore(sess, "/root/grasp/grasp-detection/models/imagenet/m2/m2.ckpt")
    saver_g.restore(sess, FLAGS.model_path)
    try:
        count = 0
        step = 0
        start_time = time.time()
        while not coord.should_stop():
            start_batch = time.time()
            #train
            if FLAGS.train_or_validation == 'train':
                _, loss_value, images_np, x_value, x_model, y_value, y_model, degree_value, degree_model, h_value, h_model, w_value, w_model = sess.run(
                    [
                        train_op, loss, images, x, x_hat, y, y_hat, degree,
                        degree_hat, h, h_hat, w, w_hat
                    ])
                # _, loss_value, images_np, x_value, x_model, y_value, y_model, tan_value, tan_model, h_value, h_model, w_value, w_model = sess.run([train_op, loss, images, x, x_hat, y, y_hat, tan, tan_hat, h, h_hat, w, w_hat])
                duration = time.time() - start_batch
                if step % 100 == 0:
                    print(
                        'Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | y = %s\n | y_hat = %s\n | degree = %s\n | degree_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n'
                    ) % (step, loss_value, x_value[:3], x_model[:3],
                         y_value[:3], y_model[:3], degree_value[:3],
                         degree_model[:3], h_value[:3], h_model[:3],
                         w_value[:3], w_model[:3], duration)
                    # print('Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | y = %s\n | y_hat = %s\n | tan = %s\n | tan_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n')%(step, loss_value, x_value[:3], x_model[:3], y_value[:3], y_model[:3], tan_value[:3], tan_model[:3], h_value[:3], h_model[:3], w_value[:3], w_model[:3], duration)
                    # cv2.imshow('bbox', images_np[1]) ###################################
                    # print(images_np[1])
                    # cv2.imwrite("./"+str(step)+".png", images_np[1]);
                    cv2.waitKey(1)
                if step % 1000 == 0:
                    filename = MODEL_SAVE_PATH + '/step_' + str(step)
                    if not os.path.exists(filename):
                        os.mkdir(filename)
                    saver.save(sess, filename + '/m4.ckpt')

                    # cv2.imshow('bbox', images_np[1]) ###################################
                    # cv2.waitKey(0)
                    # print(images_np[0])

                    # if step == 100000:
                    #     sess.close()

            # else:
            #     bbox_hat = grasp_to_bbox(x_hat, y_hat, tan_hat, h_hat, w_hat)
            #     bbox_value, bbox_model, x_value, x_model, y_value, y_model, tan_value, tan_model, h_value, h_model, w_value, w_model = sess.run([bboxes, bbox_hat, x, x_hat, y, y_hat, tan, tan_hat, h, h_hat, w, w_hat])
            #     bbox_value = np.reshape(bbox_value, -1)
            #     bbox_value = [(bbox_value[0],bbox_value[1]),(bbox_value[2],bbox_value[3]),(bbox_value[4],bbox_value[5]),(bbox_value[6],bbox_value[7])]  # bbox_value = [(bbox_value[0]*0.35,bbox_value[1]*0.47),(bbox_value[2]*0.35,bbox_value[3]*0.47),(bbox_value[4]*0.35,bbox_value[5]*0.47),(bbox_value[6]*0.35,bbox_value[7]*0.47)]
            #     p1 = Polygon(bbox_value)
            #     p2 = Polygon(bbox_model)
            #     iou = p1.intersection(p2).area / (p1.area +p2.area -p1.intersection(p2).area)
            #     angle_diff = np.abs(np.arctan(tan_model)*180/np.pi -np.arctan(tan_value)*180/np.pi)
            #     duration = time.time() -start_batch
            #     # if angle_diff < 30. and iou >= 0.25:
            #     count+=1
            #     print('image: %d | duration = %.2f | count = %d | iou = %.2f | angle_difference = %.2f' %(step, duration, count, iou, angle_diff))
            #     print('x=',x_value,x_model,' y=',y_value, y_model,' tan=',tan_value, tan_model,' h=', h_value, h_model,' w=', w_value, w_model)
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps, %.1f min.' %
              (FLAGS.num_epochs, step, (time.time() - start_time) / 60))
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
コード例 #3
0
def run_training():
    tf.reset_default_graph()
    print(FLAGS.train_or_validation)
    if FLAGS.train_or_validation == 'train':
        print('distorted_inputs')
        data_files_ = TRAIN_FILE  #datafiles is the actual tfrdata
        images, bboxes = grasp_img_proc.distorted_inputs(
            [data_files_], FLAGS.num_epochs, batch_size=FLAGS.batch_size)

    else:
        #Validation dataset
        print('inputs')
        data_files_ = VALIDATE_FILE
        images, bboxes = grasp_img_proc.inputs([data_files_])

    # These are the labels (uses the grasp_img_proc module with the disorted images), also everything is processed as bboxes as that the labels are in xy coords
    x, y, tan, h, w = bboxes_to_grasps(bboxes)

    # These are the outputs of the model - used for the error minimisation (uses the grasp_inf model) - outputs the grasp representation directly
    x_hat, y_hat, tan_hat, h_hat, w_hat = tf.unstack(inference(images), axis=1)

    # tangent of 85 degree is 11
    tan_hat_confined = tf.minimum(11., tf.maximum(-11., tan_hat))
    tan_confined = tf.minimum(11., tf.maximum(-11., tan))

    # Loss function
    gamma = tf.constant(10.)
    # A custom cost function for the box regression - essentially just a custom MSE
    loss = tf.reduce_sum(
        tf.pow(x_hat - x, 2) + tf.pow(y_hat - y, 2) +
        gamma * tf.pow(tan_hat_confined - tan_confined, 2) +
        tf.pow(h_hat - h, 2) + tf.pow(w_hat - w, 2))

    # Instead of stochastic gradient descent
    train_op = tf.train.AdamOptimizer(epsilon=0.1).minimize(loss)

    # Initiliases the variables to have the values they have been parametised by
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    #Create a session
    sess = tf.Session()
    sess.run(init_op)

    #Allows for thread qeueing
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    #Some stuff related to plotting

    #save/restore model

    #d are the weights that were used to pre-train on Imagenet! - hence why it doesnt contain w_output and b_output (those would have been classification outputs)
    d = {}
    l = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2'
    ]
    # Iterates through the list l, if its in the GraphKeys, store it in the tuple d
    for i in l:
        d[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    dg = {}
    lg = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2', 'w_output', 'b_output'
    ]
    for i in lg:
        dg[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    # This essentially just saves the current models
    #saver = tf.train.Saver(d)
    saver_g = tf.train.Saver(dg)
    #my_models/test_crash/test_crash_continuation
    if FLAGS.continue_from_trained_model == 'yes':
        #saver.restore(sess, "/root/grasp/grasp-detection/models/imagenet/m2/m2.ckpt")
        saver_g.restore(sess, "my_models/test_save/test_save")
        '''Restores a previously trained model'''

    #Just a quick code to determine the ecurrent epoch
    steps_per_epoch = int(708 / FLAGS.batch_size)
    model_no = 0
    try:
        count = 0
        step = 0
        epoch = 0
        start_time = time.time()
        while not coord.should_stop():
            start_batch = time.time()

            if FLAGS.train_or_validation == 'train':
                #so everytime the loss, x, x_hat, tan etc are called, their operations as well as their operation flows are also called implicitly - graph flow
                _, loss_value, x_value, x_model, tan_value, tan_model, h_value, h_model, w_value, w_model = sess.run(
                    [
                        train_op, loss, x, x_hat, tan, tan_hat, h, h_hat, w,
                        w_hat
                    ])
                duration = time.time() - start_batch

                #if step % 100 == 0:
                #print("Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | tan = %s\n | tan_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n"%(step, loss_value, x_value[:3], x_model[:3], tan_value[:3], tan_model[:3], h_value[:3], h_model[:3], w_value[:3], w_model[:3], duration))
                #How come the y values are not included? - does that not matter because the x's are already being called?

                if step % 110 == 0:
                    saver_g.save(
                        sess, "my_models/test_save/test_save" + str(model_no))
                    model_no += 1

                if step % steps_per_epoch == 0:

                    print(
                        "Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | tan = %s\n | tan_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n)"
                        % (step, loss_value, x_value[:3], x_model[:3],
                           tan_value[:3], tan_model[:3], h_value[:3],
                           h_model[:3], w_value[:3], w_model[:3], duration))
                    print("Current numbers of epoch: %d" % (epoch))
                    ani = FuncAnimation(fig, animate(epoch, loss_value), 1)
                    epoch += 1
                    plt.tight_layout()
                    plt.show()
                    #MAKE A LIVE GRAPH MAKE IT EASIER TO SEE

            else:
                #VALIDATION
                '''wont work yet as I have not edited the grasp_img_proc file yet'''
                #Converts output of NN to four corner vertices
                bbox_hat = grasp_to_bbox(x_hat, y_hat, tan_hat, h_hat, w_hat)

                #Gets the value of the actual vertices (randomly), bbox from NN, the actual tan and the predicted tan
                bbox_value, bbox_model, tan_value, tan_model = sess.run(
                    [bboxes, bbox_hat, tan, tan_hat])

                #Turn the bbox value into a 1D array
                bbox_value = np.reshape(bbox_value, -1)

                #Rescale it to the size of the 224x224 output of the neural net
                bbox_value = [(bbox_value[0] * 0.35, bbox_value[1] * 0.47),
                              (bbox_value[2] * 0.35, bbox_value[3] * 0.47),
                              (bbox_value[4] * 0.35, bbox_value[5] * 0.47),
                              (bbox_value[6] * 0.35, bbox_value[7] * 0.47)]

                #Takes in the x,y coordinates of the vertices, and creates rectangles from vertices
                p1 = Polygon(bbox_value)
                p2 = Polygon(bbox_model)

                #Jaccard Index/ if area is greater than 25% then it counds
                jaccard = p1.intersection(p2).area / (p1.area + p2.area -
                                                      p1.intersection(p2).area)

                #Also if the angle is within 30 degrees of the randomly picked rectangle then:
                angle_diff = np.abs(
                    np.arctan(tan_model) * 180 / np.pi -
                    np.arctan(tan_value) * 180 / np.pi)

                duration = time.time() - start_batch
                if angle_diff < 30. and jaccard >= 0.25:
                    #Add to the count of the 'correct'
                    count += 1
                    print(
                        'image: %d | duration = %.2f | count = %d | jaccard = %.2f | angle_difference = %.2f'
                        % (step, duration, count, jaccard, angle_diff))

            step += 1

    except tf.errors.OutOfRangeError:  #some error
        saver_g.save(sess, "my_models/test_save/test_save" +
                     str(model_no))  #Best to save it again at the end time
        if FLAGS.train_or_validation == 'train':
            print('Done training for %d epochs, %d steps, %.1f min.' %
                  (FLAGS.num_epochs, step, (time.time() - start_time) / 60))
        else:
            #print("Number of validation data: ", step)
            error = ((1 - (count / step)) * 100)
            print("\nError of %.2f%%" % (error))
    finally:
        coord.request_stop()  #stops the threading/queueing

    coord.join(threads)  #rejoins the threads

    sess.close()
    return error
コード例 #4
0
def run_training():
    print(FLAGS.train_or_validation)
    if FLAGS.train_or_validation == 'train':
        print('distorted_inputs')
        data_files_ = TRAIN_FILE
        images, bboxes = grasp_img_proc.distorted_inputs(
            [data_files_], FLAGS.num_epochs, batch_size=FLAGS.batch_size)
    else:
        print('inputs')
        data_files_ = VALIDATE_FILE
        images, bboxes = grasp_img_proc.inputs([data_files_])

    x, y, tan, h, w = bboxes_to_grasps(bboxes)
    x_hat, y_hat, tan_hat, h_hat, w_hat = tf.unstack(inference(images),
                                                     axis=1)  # list
    # tangent of 85 degree is 11
    tan_hat_confined = tf.minimum(11., tf.maximum(-11., tan_hat))
    tan_confined = tf.minimum(11., tf.maximum(-11., tan))
    # Loss function
    gamma = tf.constant(10.)
    loss = tf.reduce_sum(
        tf.pow(x_hat - x, 2) + tf.pow(y_hat - y, 2) +
        gamma * tf.pow(tan_hat_confined - tan_confined, 2) +
        tf.pow(h_hat - h, 2) + tf.pow(w_hat - w, 2))
    train_op = tf.train.AdamOptimizer(epsilon=0.1).minimize(loss)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #save/restore model
    d = {}
    l = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2'
    ]
    for i in l:
        d[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    dg = {}
    lg = [
        'w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1',
        'b_fc1', 'w_fc2', 'b_fc2', 'w_output', 'b_output'
    ]
    for i in lg:
        dg[i] = [
            v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if v.name == i + ':0'
        ][0]

    saver = tf.train.Saver(d)
    saver_g = tf.train.Saver(dg)
    #saver.restore(sess, "/root/grasp/grasp-detection/models/imagenet/m2/m2.ckpt")
    saver_g.restore(sess, FLAGS.model_path)
    try:
        count = 0
        step = 0
        start_time = time.time()
        while not coord.should_stop():
            start_batch = time.time()
            #train
            if FLAGS.train_or_validation == 'train':
                _, loss_value, x_value, x_model, tan_value, tan_model, h_value, h_model, w_value, w_model = sess.run(
                    [
                        train_op, loss, x, x_hat, tan, tan_hat, h, h_hat, w,
                        w_hat
                    ])
                duration = time.time() - start_batch
                if step % 100 == 0:
                    print(
                        'Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | tan = %s\n | tan_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n'
                    ) % (step, loss_value, x_value[:3], x_model[:3],
                         tan_value[:3], tan_model[:3], h_value[:3],
                         h_model[:3], w_value[:3], w_model[:3], duration)
                if step % 1000 == 0:
                    saver_g.save(sess, FLAGS.model_path)
            else:
                bbox_hat = grasp_to_bbox(x_hat, y_hat, tan_hat, h_hat, w_hat)
                bbox_value, bbox_model, tan_value, tan_model = sess.run(
                    [bboxes, bbox_hat, tan, tan_hat])
                bbox_value = np.reshape(bbox_value, -1)
                bbox_value = [(bbox_value[0] * 0.35, bbox_value[1] * 0.47),
                              (bbox_value[2] * 0.35, bbox_value[3] * 0.47),
                              (bbox_value[4] * 0.35, bbox_value[5] * 0.47),
                              (bbox_value[6] * 0.35, bbox_value[7] * 0.47)]
                p1 = Polygon(bbox_value)
                p2 = Polygon(bbox_model)
                iou = p1.intersection(p2).area / (p1.area + p2.area -
                                                  p1.intersection(p2).area)
                angle_diff = np.abs(
                    np.arctan(tan_model) * 180 / np.pi -
                    np.arctan(tan_value) * 180 / np.pi)
                duration = time.time() - start_batch
                if angle_diff < 30. and iou >= 0.25:
                    count += 1
                    print(
                        'image: %d | duration = %.2f | count = %d | iou = %.2f | angle_difference = %.2f'
                        % (step, duration, count, iou, angle_diff))
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps, %.1f min.' %
              (FLAGS.num_epochs, step, (time.time() - start_time) / 60))
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()