Example #1
0
def get_intraclass_variance_loss_v1(dims_reg,
                                    y_class,
                                    intraclsdims_train_classes,
                                    num_classes,
                                    use_margin_loss,
                                    dims_sd_margin,
                                    loss_type,
                                    scope=None):

    with tf.variable_scope(scope):
        groups = tf.dynamic_partition(dims_reg, y_class, num_classes)
        group_losses = []
        for i, (train_on_cls, cls_dims_reg) in enumerate(
                zip(intraclsdims_train_classes, groups)):

            if not train_on_cls: continue

            # cls_dims_reg has dims (?,3)
            N = tf.shape(cls_dims_reg)[0]
            batch_mean_dims = tf.reduce_mean(cls_dims_reg, axis=0)  # (3,)
            batch_mean_dims_exp = tf_util.tf_expand_tile(batch_mean_dims,
                                                         axis=0,
                                                         tile=[N, 1])  # (?,3)
            batch_mean_dims_exp = tf.stop_gradient(batch_mean_dims_exp)
            if loss_type == 'huber':
                intraclass_variance_loss = tf.losses.huber_loss(
                    labels=batch_mean_dims_exp, predictions=cls_dims_reg)
            elif loss_type == 'mse':
                intraclass_variance_loss = tf.losses.mean_squared_error(
                    labels=batch_mean_dims_exp, predictions=cls_dims_reg)
            group_losses.append(intraclass_variance_loss)

        intraclass_variance_loss = tf.reduce_mean(group_losses)

    return intraclass_variance_loss
Example #2
0
def convert_raw_y_box_to_reg_format(y_box, one_hot_vec):
    """
    Convert y_box into anchor format and into reg format.
    Note: y_box is raw format and does not have same dimensions as pred_box.
    """
    y_centers, y_orient_cls, y_orient_reg, y_dims_cls, y_dims_reg = y_box
    class_ids = tf.cast(tf.argmax(one_hot_vec, axis=1), dtype=tf.int32)
    dims_anchors = tf.constant(MEAN_DIMS_ARR, dtype=tf.float32)
    orient_anchors = tf.constant(np.arange(0, 2 * np.pi,
                                           2 * np.pi / NUM_HEADING_BIN),
                                 dtype=tf.float32)

    # Dims
    dims_cls = tf.one_hot(y_dims_cls,
                          depth=NUM_SIZE_CLUSTER,
                          on_value=1,
                          off_value=0,
                          axis=-1)  # (B,NUM_SIZE_CLUSTER)
    dims_reg = tf_util.tf_expand_tile(y_dims_reg,
                                      axis=1,
                                      tile=[1, NUM_SIZE_CLUSTER,
                                            1])  # (B,NUM_SIZE_CLUSTER,3)

    # Orient
    orient_cls = tf.one_hot(y_orient_cls,
                            depth=NUM_HEADING_BIN,
                            on_value=1,
                            off_value=0,
                            axis=-1)  # (B,NUM_HEADING_BIN)
    orient_reg = tf_util.tf_expand_tile(y_orient_reg,
                                        axis=1,
                                        tile=[1, NUM_HEADING_BIN
                                              ])  # (B,NUM_HEADING_BIN)

    box = (y_centers, dims_cls, dims_reg, orient_cls, orient_reg)
    box_reg = tf_util.tf_convert_box_params_from_anchor_to_reg_format_multi(
        box, class_ids, dims_anchors, orient_anchors)
    return box_reg
Example #3
0
def get_model(boxpc,
              is_training,
              one_hot_vec,
              use_one_hot_vec=False,
              bn_decay=None,
              c=None):

    end_points = {
        'class_ids': tf.cast(tf.argmax(one_hot_vec, axis=1), dtype=tf.int32)
    }
    box_reg, pc = boxpc
    delta_dims = 3 + 3 + 1

    if not use_one_hot_vec: one_hot_vec = None

    # Predict if the box fits the point cloud or not + Delta term to correct the box
    output, feats = semisup_models.box_pc_mask_features_model(
        box_reg,
        pc,
        None,
        2 + delta_dims,
        is_training,
        end_points=end_points,
        reuse=False,
        bn_for_output=False,
        one_hot_vec=one_hot_vec,
        norm_box2D=None,
        bn_decay=bn_decay,
        c=c,
        scope='box_pc_mask_model')
    boxpc_fit_logits = output[:, -2:]
    logits_for_weigh = tf.nn.softmax(boxpc_fit_logits)[:, 1]
    pred_boxpc_fit = tf.cast(
        tf.nn.softmax(boxpc_fit_logits)[:, 1] > 0.5, tf.int32)

    logits_for_weigh = tf.stop_gradient(logits_for_weigh) if c.BOXPC_STOP_GRAD_OF_CLS_VIA_DELTA \
                       else logits_for_weigh
    end_points['boxpc_feats_dict'] = feats
    end_points['boxpc_fit_logits'] = boxpc_fit_logits
    end_points['pred_boxpc_fit'] = pred_boxpc_fit
    end_points['logits_for_weigh'] = logits_for_weigh

    # Delta term
    boxpc_delta_center = output[:, 0:3]  # (B,3)
    boxpc_delta_size = output[:, 3:6]  # (B,3)
    boxpc_delta_angle = output[:, 6]  # (B,)
    # Weigh the predictions by the cls confidence
    # (1 - logits) because if logits close to 1 (we are confident that the box fits), then
    # we do not want as much delta box to be applied
    if c.BOXPC_WEIGH_DELTA_PRED_BY_CLS_CONF:
        weigh_delta = 1. - logits_for_weigh
        boxpc_delta_center = boxpc_delta_center * tf_util.tf_expand_tile(
            weigh_delta, axis=1, tile=[1, 3])
        boxpc_delta_size = boxpc_delta_size * tf_util.tf_expand_tile(
            weigh_delta, axis=1, tile=[1, 3])
        boxpc_delta_angle = boxpc_delta_angle * weigh_delta
    end_points['boxpc_delta_center'] = boxpc_delta_center
    end_points['boxpc_delta_size'] = boxpc_delta_size
    end_points['boxpc_delta_angle'] = boxpc_delta_angle

    pred_delta_box = (boxpc_delta_center, boxpc_delta_size, boxpc_delta_angle)
    pred = (boxpc_fit_logits, pred_delta_box)

    return pred, end_points
def train():
    # Print configurations
    log_string('\n\nCommand:\npython %s\n' % ' '.join(sys.argv))
    log_string(FLAGS.config_str)

    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            pc_pl, bg_pc_pl, img_pl, one_hot_vec_pl, y_seg_pl, y_centers_pl, y_orient_cls_pl, \
            y_orient_reg_pl, y_dims_cls_pl, y_dims_reg_pl, R0_rect_pl, P_pl, Rtilt_pl, K_pl, \
            rot_frust_pl, box2D_pl, img_dim_pl, is_data_2D_pl = \
                MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, FLAGS.NUM_CHANNELS)
            is_training_pl = tf.placeholder(tf.bool, shape=())

            # ================================== OPTIMIZERS ==================================
            # Note the global_step=batch parameter to minimize. That tells the optimizer to
            # helpfully increment the 'batch' parameter for you every time it trains.
            batch = tf.Variable(0)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('Info/bn_decay', bn_decay)
            batch_D = tf.Variable(0)
            bn_decay_D = get_bn_decay(batch_D)
            tf.summary.scalar('Info/bn_decay_D', bn_decay_D)

            # Get training operator
            learning_rate = get_learning_rate(batch, BASE_LEARNING_RATE)
            tf.summary.scalar('Info/learning_rate', learning_rate)
            learning_rate_D = get_learning_rate(batch_D, BASE_LEARNING_RATE_D)
            tf.summary.scalar('Info/learning_rate_D', learning_rate_D)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)
            elif OPTIMIZER == 'sgd':
                optimizer = tf.train.GradientDescentOptimizer(learning_rate)
            if OPTIMIZER_D == 'momentum':
                optimizer_D = tf.train.MomentumOptimizer(learning_rate_D,
                                                         momentum=MOMENTUM)
            elif OPTIMIZER_D == 'adam':
                optimizer_D = tf.train.AdamOptimizer(learning_rate_D)
            elif OPTIMIZER_D == 'sgd':
                optimizer_D = tf.train.GradientDescentOptimizer(
                    learning_rate_D)

            # ==================================== MODEL ====================================
            # Get model and loss
            labels = (y_seg_pl, y_centers_pl, y_orient_cls_pl, y_orient_reg_pl, y_dims_cls_pl, \
                      y_dims_reg_pl, R0_rect_pl, P_pl, Rtilt_pl, K_pl, rot_frust_pl, box2D_pl, \
                      img_dim_pl, is_data_2D_pl)
            # NOTE: Only use ONE optimizer during each training to prevent batch + optimizer values
            # getting affected
            norm_box2D = tf_util.tf_normalize_2D_bboxes(box2D_pl, img_dim_pl)
            pred, end_points = MODEL.get_semi_model(
                pc_pl,
                bg_pc_pl,
                img_pl,
                one_hot_vec_pl,
                is_training_pl,
                use_one_hot=FLAGS.use_one_hot,
                norm_box2D=norm_box2D,
                bn_decay=bn_decay,
                c=FLAGS)

            intraclsdims_train_classes = [(True if cls_type in FLAGS.TEST_CLS else False) for cls_type \
                in ALL_CLASSES] if FLAGS.SEMI_INTRACLSDIMS_ONLY_ON_2D_CLS else [True] * len(ALL_CLASSES)
            inactive_vol_train_classes = [(True if cls_type in FLAGS.TEST_CLS else False) for cls_type \
                in ALL_CLASSES] if FLAGS.WEAK_INACTIVE_VOL_ONLY_ON_2D_CLS else [True] * len(ALL_CLASSES)
            end_points.update({
                'intraclsdims_train_classes':
                intraclsdims_train_classes,
                'inactive_vol_train_classes':
                inactive_vol_train_classes
            })
            log_string('\n  Train on 2D only    (Reprojection): %s\n' %
                       str(FLAGS.WEAK_REPROJECTION_ONLY_ON_2D_CLS))
            log_string('\n  Train on 2D only    (Box PC Fit)  : %s\n' %
                       str(FLAGS.SEMI_BOXPC_FIT_ONLY_ON_2D_CLS))
            log_string('\n  Classes to train on (Inactive vol): %s\n' %
                       str(inactive_vol_train_classes))
            logits = pred[0]

            # ====================================== D MODEL ======================================
            import boxpc_sunrgbd
            y_box = (y_centers_pl, y_orient_cls_pl, y_orient_reg_pl,
                     y_dims_cls_pl, y_dims_reg_pl)
            y_box_reg = boxpc_sunrgbd.convert_raw_y_box_to_reg_format(
                y_box, one_hot_vec_pl)
            real_box_pc = (y_box_reg, pc_pl)
            fake_box_pc = (end_points['F_pred_box_reg'], pc_pl)
            is_training_D = is_training_pl if FLAGS.SEMI_TRAIN_BOXPC_MODEL else tf.squeeze(
                tf.zeros(1, dtype=tf.bool))
            with tf.variable_scope('D_boxpc_branch', reuse=None):
                real_boxpc_pred, real_boxpc_ep = boxpc_sunrgbd.get_model(
                    real_box_pc,
                    is_training_D,
                    one_hot_vec_pl,
                    use_one_hot_vec=FLAGS.use_one_hot_boxpc,
                    bn_decay=bn_decay,
                    c=FLAGS)
            with tf.variable_scope('D_boxpc_branch', reuse=True):
                fake_boxpc_pred, fake_boxpc_ep = boxpc_sunrgbd.get_model(
                    fake_box_pc,
                    is_training_D,
                    one_hot_vec_pl,
                    use_one_hot_vec=FLAGS.use_one_hot_boxpc,
                    bn_decay=bn_decay,
                    c=FLAGS)
            logits_real = real_boxpc_ep['boxpc_fit_logits']
            logits_fake = fake_boxpc_ep['boxpc_fit_logits']
            D_loss = weak_losses.get_D_loss(
                logits_real,
                logits_fake,
                loss_type='SOFTMAX',
                use_soft_noisy_labels_D=FLAGS.SEMI_ADV_SOFT_NOISY_LABELS_FOR_D,
                flip_labels_prob=FLAGS.SEMI_ADV_FLIP_LABELS_FOR_D_PROB,
                mask_real=tf.cast(1 - is_data_2D_pl, tf.float32),
                mask_fake=None,
                scope='D_loss')
            trainable_D_vars = get_scope_vars('D_boxpc_branch',
                                              trainable_only=True)
            fake_boxpc_fit_prob = tf.nn.softmax(logits_fake)[:, 1]

            curr_box = end_points['F_pred_box_reg']
            curr_center_reg, curr_size_reg, curr_angle_reg = curr_box

            total_delta_center = tf.zeros_like(curr_center_reg)
            total_delta_angle = tf.zeros_like(curr_angle_reg)
            total_delta_size = tf.zeros_like(curr_size_reg)
            for i in range(FLAGS.SEMI_REFINE_USING_BOXPC_DELTA_NUM):
                fake_box_pc = (curr_box, pc_pl)
                with tf.variable_scope('D_boxpc_branch', reuse=True):
                    fake_boxpc_pred, fake_boxpc_ep = boxpc_sunrgbd.get_model(
                        fake_box_pc,
                        is_training_D,
                        one_hot_vec_pl,
                        use_one_hot_vec=FLAGS.use_one_hot_boxpc,
                        c=FLAGS)

                # Final predicted box
                weight = 1 - fake_boxpc_ep['logits_for_weigh'] if \
                    FLAGS.SEMI_WEIGH_BOXPC_DELTA_DURING_TEST else \
                    tf.ones_like(fake_boxpc_ep['logits_for_weigh'])
                weight_exp = tf_util.tf_expand_tile(weight,
                                                    axis=1,
                                                    tile=[1, 3])
                delta_center = fake_boxpc_ep['boxpc_delta_center'] * weight_exp
                delta_angle = fake_boxpc_ep['boxpc_delta_angle'] * weight
                delta_size = fake_boxpc_ep['boxpc_delta_size'] * weight_exp
                curr_center_reg, curr_size_reg, curr_angle_reg = curr_box
                refined_center_reg = curr_center_reg - delta_center
                refined_angle_reg = curr_angle_reg - delta_angle
                refined_size_reg = curr_size_reg - delta_size
                curr_box = (refined_center_reg, refined_size_reg,
                            refined_angle_reg)

                total_delta_center = total_delta_center + delta_center
                total_delta_angle = total_delta_angle + delta_angle
                total_delta_size = total_delta_size + delta_size

            if FLAGS.SEMI_BOXPC_MIN_FIT_LOSS_AFT_REFINE:
                fake_boxpc_fit_prob = tf.nn.softmax(
                    fake_boxpc_ep['boxpc_fit_logits'])[:, 1]

            F2_center = end_points['F_center'] - total_delta_center
            F2_heading_scores = end_points['F_heading_scores']
            F2_heading_residuals = end_points['F_heading_residuals'] - \
                                   tf_util.tf_expand_tile(total_delta_angle, axis=1, tile=[1,12])
            F2_size_scores = end_points['F_size_scores']
            F2_size_residuals    = end_points['F_size_residuals'] - \
                                   tf_util.tf_expand_tile(total_delta_size, axis=1, tile=[1,10,1])

            end_points.update({
                'pred_boxpc_fit':
                fake_boxpc_ep['pred_boxpc_fit'],
                'boxpc_feats_dict':
                fake_boxpc_ep['boxpc_feats_dict'],
                'boxpc_fit_prob':
                fake_boxpc_fit_prob,
                'boxpc_delta_center':
                fake_boxpc_ep['boxpc_delta_center'],
                'boxpc_delta_size':
                fake_boxpc_ep['boxpc_delta_size'],
                'boxpc_delta_angle':
                fake_boxpc_ep['boxpc_delta_angle'],
                'F2_center':
                F2_center,
                'F2_heading_scores':
                F2_heading_scores,
                'F2_heading_residuals':
                F2_heading_residuals,
                'F2_size_scores':
                F2_size_scores,
                'F2_size_residuals':
                F2_size_residuals
            })

            # ======================================= G LOSS =======================================
            semi_loss = MODEL.get_semi_loss(pred, labels, end_points, c=FLAGS)
            train_vars = get_scope_vars('class_dependent', trainable_only=True)
            if FLAGS.SEMI_TRAIN_BOX_TRAIN_CLASS_AG_TNET:
                train_vars += get_scope_vars('class_agnostic/tnet',
                                             trainable_only=True)
            if FLAGS.SEMI_TRAIN_BOX_TRAIN_CLASS_AG_BOX:
                train_vars += get_scope_vars('class_agnostic/box',
                                             trainable_only=True)
            train_semi_op = optimizer.minimize(semi_loss,
                                               global_step=batch,
                                               var_list=train_vars)
            tf.summary.scalar('Total_Loss/semi_loss', semi_loss)
            ops = {'semi_loss': semi_loss, 'train_semi_op': train_semi_op}

            correct = tf.equal(tf.argmax(logits, 2), tf.to_int64(y_seg_pl))
            accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(
                BATCH_SIZE * NUM_POINT)
            tf.summary.scalar('Seg_IOU/accuracy', accuracy)

            # Add ops to save and restore all the variables.
            saver = tf.train.Saver(max_to_keep=5)

        # ======================================== LOGS ========================================
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(pjoin(LOG_DIR, 'train'),
                                             sess.graph)
        test_writer = tf.summary.FileWriter(pjoin(LOG_DIR, 'test'), sess.graph)

        # ==================================== INIT & RESTORE ====================================
        # Init variables
        if FLAGS.init_class_ag_path is not None:

            # Restore only certain variables
            class_ag_scopes = ['class_agnostic']
            class_ag_scopenames_in_ckpt = ['']
            load_variable_scopes_from_ckpt(class_ag_scopes,
                                           class_ag_scopenames_in_ckpt, sess,
                                           FLAGS.init_class_ag_path)

            boxpc_scopes = ['D_boxpc_branch']
            boxpc_scopenames_in_ckpt = ['']
            load_variable_scopes_from_ckpt(boxpc_scopes,
                                           boxpc_scopenames_in_ckpt, sess,
                                           FLAGS.init_boxpc_path[0])

            # Initialize the rest
            already_init_var_scopes = ['class_agnostic', 'D_boxpc_branch']
            init = tf.variables_initializer(
                get_scope_vars_except_unwanted_scopes(already_init_var_scopes,
                                                      trainable_only=False))
            sess.run(init)

        elif FLAGS.restore_model_path is None:
            assert (FLAGS.init_class_ag_path is None)
            assert (FLAGS.init_boxpc_path is None)
            init = tf.global_variables_initializer()
            sess.run(init)
        else:
            saver.restore(sess, FLAGS.restore_model_path)

        ops.update({
            'pc_pl': pc_pl,
            'bg_pc_pl': bg_pc_pl,
            'img_pl': img_pl,
            'one_hot_vec_pl': one_hot_vec_pl,
            'y_seg_pl': y_seg_pl,
            'y_centers_pl': y_centers_pl,
            'y_orient_cls_pl': y_orient_cls_pl,
            'y_orient_reg_pl': y_orient_reg_pl,
            'y_dims_cls_pl': y_dims_cls_pl,
            'y_dims_reg_pl': y_dims_reg_pl,
            'R0_rect_pl': R0_rect_pl,
            'P_pl': P_pl,
            'Rtilt_pl': Rtilt_pl,
            'K_pl': K_pl,
            'rot_frust_pl': rot_frust_pl,
            'box2D_pl': box2D_pl,
            'img_dim_pl': img_dim_pl,
            'is_data_2D_pl': is_data_2D_pl,
            'is_training_pl': is_training_pl,
            'logits': logits,
            'merged': merged,
            'step': batch,
            'end_points': end_points
        })

        # ====================================== TRAINING ======================================
        best_loss = 1e10
        for epoch in range(MAX_EPOCH):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()

            T1.tic()
            train_one_epoch(sess, ops, train_writer)
            epoch_loss = eval_one_epoch(sess, ops, test_writer)
            T1.toc(average=False)

            # Save the variables to disk.
            if epoch % 5 == 0:
                save_path = saver.save(
                    sess, pjoin(LOG_DIR, 'model_epoch_%d.ckpt' % epoch))
                log_string('Model saved in file: %s' % save_path)
Example #5
0
def get_model(batch_size, num_point, num_channel, use_oracle_mask=False):

    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            is_training_pl = tf.placeholder(tf.bool, shape=())

            pc_pl, bg_pc_pl, img_pl, one_hot_vec_pl, y_seg_pl, y_centers_pl, y_orient_cls_pl, \
            y_orient_reg_pl, y_dims_cls_pl, y_dims_reg_pl, R0_rect_pl, P_pl, Rtilt_pl, K_pl, \
            rot_frust_pl, box2D_pl, img_dim_pl, is_data_2D_pl = \
                MODEL.placeholder_inputs(batch_size, num_point, num_channel)
            labels = (y_seg_pl, y_centers_pl, y_orient_cls_pl, y_orient_reg_pl, y_dims_cls_pl, \
                      y_dims_reg_pl, R0_rect_pl, P_pl, Rtilt_pl, K_pl, rot_frust_pl, box2D_pl, \
                      img_dim_pl, is_data_2D_pl)
            norm_box2D = tf_util.tf_normalize_2D_bboxes(box2D_pl, img_dim_pl)
            oracle_mask = y_seg_pl if use_oracle_mask else None

            pred, end_points = MODEL.get_semi_model(
                pc_pl,
                bg_pc_pl,
                img_pl,
                one_hot_vec_pl,
                is_training_pl,
                norm_box2D=norm_box2D,
                use_one_hot=FLAGS.use_one_hot,
                oracle_mask=oracle_mask,
                c=FLAGS)
            logits = pred[0]

            # ================================ BOXPC MODEL ================================
            prefix = 'F_'
            FLAGS.SEMI_REFINE_USING_BOXPC_DELTA_NUM = int(FLAGS.refine)
            FLAGS.SEMI_WEIGH_BOXPC_DELTA_DURING_TEST = False
            FLAGS.BOX_PC_MASK_REPRESENTATION = 'A'

            import boxpc_sunrgbd
            is_training_D = tf.squeeze(tf.zeros(1, dtype=tf.bool))
            curr_box = end_points[prefix + 'pred_box_reg']
            curr_center_reg, curr_size_reg, curr_angle_reg = curr_box

            # Compute the delta_box for a given (Box, PC) pair and add these
            # delta terms repeatedly. The final box terms are represented by
            # the 'F2_' terms.
            boxpc_fit_prob = None
            total_delta_center = tf.zeros_like(curr_center_reg)
            total_delta_angle = tf.zeros_like(curr_angle_reg)
            total_delta_size = tf.zeros_like(curr_size_reg)
            for i in range(FLAGS.SEMI_REFINE_USING_BOXPC_DELTA_NUM):
                reuse = None if i == 0 else True
                if FLAGS.mask_pc_for_boxpc:
                    mask = tf.cast(tf.argmax(logits, axis=2), tf.float32)
                    mask = tf_util.tf_expand_tile(mask,
                                                  axis=2,
                                                  tile=[1, 1, num_channel])
                    fake_box_pc = (curr_box, pc_pl * mask)
                else:
                    fake_box_pc = (curr_box, pc_pl)

                with tf.variable_scope('D_boxpc_branch', reuse=reuse):
                    fake_boxpc_pred, fake_boxpc_ep = boxpc_sunrgbd.get_model(
                        fake_box_pc,
                        is_training_D,
                        one_hot_vec=one_hot_vec_pl,
                        use_one_hot_vec=False,
                        c=FLAGS)
                boxpc_fit_prob = tf.nn.softmax(
                    fake_boxpc_ep['boxpc_fit_logits'])[:, 1]

                # Final predicted box
                weight = 1 - fake_boxpc_ep['logits_for_weigh'] if \
                    FLAGS.SEMI_WEIGH_BOXPC_DELTA_DURING_TEST else \
                    tf.ones_like(fake_boxpc_ep['logits_for_weigh'])
                weight_exp = tf_util.tf_expand_tile(weight,
                                                    axis=1,
                                                    tile=[1, 3])
                delta_center = fake_boxpc_ep['boxpc_delta_center'] * weight_exp
                delta_angle = fake_boxpc_ep['boxpc_delta_angle'] * weight
                delta_size = fake_boxpc_ep['boxpc_delta_size'] * weight_exp
                curr_center_reg, curr_size_reg, curr_angle_reg = curr_box
                refined_center_reg = curr_center_reg - delta_center
                refined_angle_reg = curr_angle_reg - delta_angle
                refined_size_reg = curr_size_reg - delta_size
                curr_box = (refined_center_reg, refined_size_reg,
                            refined_angle_reg)

                total_delta_center = total_delta_center + delta_center
                total_delta_angle = total_delta_angle + delta_angle
                total_delta_size = total_delta_size + delta_size

            F2_center = end_points[prefix + 'center'] - total_delta_center
            F2_heading_scores = end_points[prefix + 'heading_scores']
            F2_heading_residuals = end_points[prefix+'heading_residuals'] - \
                                   tf_util.tf_expand_tile(total_delta_angle, axis=1, tile=[1,12])
            F2_size_scores = end_points[prefix + 'size_scores']
            F2_size_residuals    = end_points[prefix+'size_residuals'] - \
                                   tf_util.tf_expand_tile(total_delta_size, axis=1, tile=[1,10,1])

            end_points.update({
                'boxpc_fit_prob': boxpc_fit_prob,
                'F2_center': F2_center,
                'F2_heading_scores': F2_heading_scores,
                'F2_heading_residuals': F2_heading_residuals,
                'F2_size_scores': F2_size_scores,
                'F2_size_residuals': F2_size_residuals
            })

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        sess = tf.Session(config=config)

        # Restore variables from disk.
        saver = tf.train.Saver()
        saver.restore(sess, MODEL_PATH)

        ops = {
            'pc_pl': pc_pl,
            'one_hot_vec_pl': one_hot_vec_pl,
            'y_seg_pl': y_seg_pl,
            'y_centers_pl': y_centers_pl,
            'y_orient_cls_pl': y_orient_cls_pl,
            'y_orient_reg_pl': y_orient_reg_pl,
            'y_dims_cls_pl': y_dims_cls_pl,
            'y_dims_reg_pl': y_dims_reg_pl,
            'R0_rect_pl': R0_rect_pl,
            'P_pl': P_pl,
            'Rtilt_pl': Rtilt_pl,
            'K_pl': K_pl,
            'rot_frust_pl': rot_frust_pl,
            'box2D_pl': box2D_pl,
            'img_dim_pl': img_dim_pl,
            'is_training_pl': is_training_pl,
            'logits': logits,
            'end_points': end_points
        }
        return sess, ops