Exemple #1
0
def loss(logit_d, logit_n, normal, depth, shape_mask, ds_mask, cl_mask_inv, gt_ds, npr, mode=0):
    mask_crop = slice_tensor(shape_mask, logit_n)
    mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])

    # normal loss (l2)
    gt_normal = slice_tensor(normal, logit_n)
    n_loss = tf.losses.mean_squared_error(gt_normal, logit_n, weights=mask_crop3)

    real_n_loss = tf.losses.absolute_difference(gt_normal, logit_n, weights=mask_crop3)

    # depth loss (l2)
    gt_depth = slice_tensor(depth, logit_n)
    d_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=mask_crop)

    real_d_loss = tf.losses.absolute_difference(gt_depth, logit_d, weights=mask_crop)

    # depth sample loss (l2)
    d_mask_crop = slice_tensor(ds_mask, logit_n)
    ds_loss = tf.constant(0.0, dtype=tf.float32, shape=[])
    if mode == 1 or mode == 3:
        ds_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=d_mask_crop)

    # regularization loss
    r_loss = tf.constant(0.0, dtype=tf.float32, shape=[])
    if mode == 2 or mode == 3:
        r_loss = reg_loss(logit_n, logit_d, shape_mask, cl_mask_inv)

    total_loss = 0.76 * d_loss + n_loss + hyper_params['dsweight'] * ds_loss + hyper_params['regWeight'] * r_loss

    shape_mask_crop = slice_tensor(shape_mask, logit_n)
    shape_mask_crop3 = tf.tile(shape_mask_crop, [1, 1, 1, 3])

    return total_loss, d_loss, n_loss, real_d_loss, real_n_loss, gt_normal * shape_mask_crop3, logit_n * shape_mask_crop3, \
           gt_depth * shape_mask_crop, logit_d * shape_mask_crop, gt_ds * shape_mask_crop, npr
def loss(logit_d, logit_n, normal, depth, shape_mask, ds_mask, cl_mask_inv, reg_weight, fl_mask_inv,
         mode=0, scope='loss'):
    with tf.name_scope(scope) as _:
        mask_crop = slice_tensor(shape_mask, logit_n)
        mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])

        # normal loss (l2)
        gt_normal = slice_tensor(normal, logit_n)
        n_loss = tf.losses.mean_squared_error(gt_normal, logit_n, weights=mask_crop3)

        real_n_loss = tf.losses.absolute_difference(gt_normal, logit_n, weights=mask_crop3)

        # depth loss (l2)
        gt_depth = slice_tensor(depth, logit_n)
        d_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=mask_crop)

        real_d_loss = tf.losses.absolute_difference(gt_depth, logit_d, weights=mask_crop)

        # depth sample loss (l2)
        d_mask_crop = slice_tensor(ds_mask, logit_n)
        ds_loss = tf.constant(0.0, dtype=tf.float32, shape=[])
        if mode == 1 or mode == 3:
            ds_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=d_mask_crop)

        # regularization loss
        r_loss = tf.constant(0.0, dtype=tf.float32, shape=[])
        if mode == 2 or mode == 3:
            r_loss = reg_loss(logit_n, logit_d, shape_mask, cl_mask_inv, fl_mask_inv)

        total_loss = 0.76 * d_loss + n_loss + hyper_params['dsWeight'] * ds_loss + reg_weight * r_loss

        return total_loss, d_loss, n_loss, real_d_loss, real_n_loss, r_loss, ds_loss
Exemple #3
0
def reg_loss(logit_n,
             logit_d,
             shape_mask,
             cl_mask_inverse,
             fl_mask_inv,
             scope='reg_loss'):
    with tf.name_scope(scope) as _:
        # convert normal signal back to [-1, 1]
        converted_n = (logit_n * 2.0) - 1.0

        img_shape = tf.shape(logit_d)
        N = img_shape[0]
        H = img_shape[1]
        W = img_shape[2]
        K = 0.007843137254902

        shape_mask_crop = slice_tensor(shape_mask, logit_d)
        l_mask_crop = slice_tensor(cl_mask_inverse, logit_d)
        fl_mask_inv_crop = slice_tensor(fl_mask_inv, logit_d)
        combined_mask = shape_mask_crop * l_mask_crop * fl_mask_inv_crop
        mask_shift_x = tf.slice(combined_mask, [0, 0, 0, 0],
                                [-1, -1, W - 1, -1])
        mask_shift_y = tf.slice(combined_mask, [0, 0, 0, 0],
                                [-1, H - 1, -1, -1])

        c0 = tf.fill([N, H, W - 1, 1], K)
        c1 = tf.zeros(shape=[N, H, W - 1, 1])

        cx = logit_d[:, :, 1:, :] - logit_d[:, :, :-1, :]
        t_x = tf.concat([c0, c1, cx], axis=3)
        # approximate normalization
        t_x /= K

        c2 = tf.zeros(shape=[N, H - 1, W, 1])
        c3 = tf.fill([N, H - 1, W, 1], K)
        cy = logit_d[:, 1:, :, :] - logit_d[:, :-1, :, :]
        t_y = tf.concat([c2, c3, cy], axis=3)
        # approximate normalization
        t_y /= K

        normal_shift_x = tf.slice(converted_n, [0, 0, 0, 0],
                                  [-1, -1, W - 1, -1])
        normal_shift_y = tf.slice(converted_n, [0, 0, 0, 0],
                                  [-1, H - 1, -1, -1])

        reg_loss1_diff = tf.reduce_sum(t_x * normal_shift_x, 3)
        reg_loss1 = tf.losses.mean_squared_error(tf.zeros(shape=[N, H, W - 1]),
                                                 reg_loss1_diff,
                                                 weights=tf.squeeze(
                                                     mask_shift_x, [3]))

        reg_loss2_diff = tf.reduce_sum(t_y * normal_shift_y, 3)
        reg_loss2 = tf.losses.mean_squared_error(tf.zeros(shape=[N, H - 1, W]),
                                                 reg_loss2_diff,
                                                 weights=tf.squeeze(
                                                     mask_shift_y, [3]))

        return reg_loss1 + reg_loss2
def collect_vis_img(logit_d, logit_n, logit_c, npr, normal, depth, shape_mask,
                    ds, cl_inv_mask, fm, fm_inv, selm, vdotn, mask2d):
    with tf.name_scope('collect_train_img') as _:
        mask_crop = slice_tensor(shape_mask, logit_n)
        mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])
        mask3 = tf.tile(shape_mask, [1, 1, 1, 3])

        logit_n = logit_n * mask_crop3
        logit_d = logit_d * mask_crop
        logit_c = logit_c * mask_crop

        gt_normal = slice_tensor(normal * mask3, logit_n)
        gt_depth = slice_tensor(depth * shape_mask, logit_d)

        npr_lines = slice_tensor(npr, logit_n)
        cl_inv_mask = slice_tensor(cl_inv_mask, logit_n)

        feature_mask = slice_tensor(fm, logit_n)
        feature_mask_inv = slice_tensor(fm_inv, logit_n)
        sel_mask = slice_tensor(selm, logit_n)
        vdotn_scalar = slice_tensor(vdotn, logit_n)

    train_npr_proto = tf.summary.image('train_npr_lines', npr_lines,
                                       hyper_params['nbDispImg'])
    train_gtn_proto = tf.summary.image('train_gt_normal', gt_normal,
                                       hyper_params['nbDispImg'])
    train_gtd_proto = tf.summary.image('train_gt_depth', gt_depth,
                                       hyper_params['nbDispImg'])
    train_fn_proto = tf.summary.image('train_out_normal', logit_n,
                                      hyper_params['nbDispImg'])
    train_fd_proto = tf.summary.image('train_out_depth', logit_d,
                                      hyper_params['nbDispImg'])
    train_gtds_proto = tf.summary.image('train_gt_ds', ds,
                                        hyper_params['nbDispImg'])
    train_fcfm_proto = tf.summary.image('train_confidence_mp', logit_c,
                                        hyper_params['nbDispImg'])
    train_cl_mask_inv_proto = tf.summary.image('train_clmask_inv', cl_inv_mask,
                                               hyper_params['nbDispImg'])
    train_fm_proto = tf.summary.image('train_feature_mask', feature_mask,
                                      hyper_params['nbDispImg'])
    train_fm_inv_proto = tf.summary.image('train_feature_mask_inv',
                                          feature_mask_inv,
                                          hyper_params['nbDispImg'])
    train_selm_proto = tf.summary.image('train_sel_mask', sel_mask,
                                        hyper_params['nbDispImg'])
    train_vdotn_proto = tf.summary.image('train_vdotn_scalar', vdotn_scalar,
                                         hyper_params['nbDispImg'])
    train_mask_proto = tf.summary.image('train_shapeMask', shape_mask,
                                        hyper_params['nbDispImg'])
    train_mask2d_proto = tf.summary.image('train_2dMask', mask2d,
                                          hyper_params['nbDispImg'])

    return [
        train_npr_proto, train_gtn_proto, train_gtd_proto, train_fn_proto,
        train_fd_proto, train_gtds_proto, train_fcfm_proto,
        train_cl_mask_inv_proto, train_fm_proto, train_fm_inv_proto,
        train_selm_proto, train_vdotn_proto, train_mask_proto,
        train_mask2d_proto
    ]
Exemple #5
0
def loss(logit_f,
         gt_field,
         shape_mask,
         l_mask_inverse,
         fl_mask_inv,
         scope='loss'):
    with tf.name_scope(scope) as _:
        mask_crop = slice_tensor(shape_mask, logit_f)
        l_mask_crop = slice_tensor(l_mask_inverse, logit_f)
        fl_mask_inv_crop = slice_tensor(fl_mask_inv, logit_f)
        combined_smooth_mask = mask_crop * l_mask_crop * fl_mask_inv_crop
        combined_smooth_mask = tf.tile(combined_smooth_mask, [1, 1, 1, 4])

        combined_mask = mask_crop * l_mask_crop
        combined_mask = tf.tile(combined_mask, [1, 1, 1, 4])

        with tf.name_scope('data_term'):
            # data term
            gt_field = slice_tensor(gt_field, logit_f)
            f_loss = tf.losses.absolute_difference(gt_field,
                                                   logit_f,
                                                   weights=combined_mask)

        with tf.name_scope('smoothness_term'):
            # smoothness term
            img_shape = tf.shape(logit_f)
            H = img_shape[1]
            W = img_shape[2]

            pixel_dif1 = logit_f[:, 1:, :, :] - logit_f[:, :-1, :, :]
            pixel_dif2 = logit_f[:, :, 1:, :] - logit_f[:, :, :-1, :]
            mask_shift1 = tf.slice(combined_smooth_mask, [0, 0, 0, 0],
                                   [-1, H - 1, -1, -1])
            mask_shift2 = tf.slice(combined_smooth_mask, [0, 0, 0, 0],
                                   [-1, -1, W - 1, -1])

            var_loss1 = tf.losses.compute_weighted_loss(tf.abs(pixel_dif1),
                                                        weights=mask_shift1)
            var_loss2 = tf.losses.compute_weighted_loss(tf.abs(pixel_dif2),
                                                        weights=mask_shift2)
            tot_var_mean = var_loss1 + var_loss2

        tot_loss = f_loss + hyper_params['smoothWeight'] * tot_var_mean

    return tot_loss, f_loss, tot_var_mean
Exemple #6
0
def loss(logit_d, logit_n, logit_c, normal, depth, shape_mask, ds_mask, cl_mask_inverse,
         gt_ds, npr, logit_f, gt_f, fl_mask_inv):
    img_shape = tf.shape(logit_d)
    N = img_shape[0]
    H = img_shape[1]
    W = img_shape[2]

    mask_crop = slice_tensor(shape_mask, logit_n)
    mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])

    zero_tensor = tf.zeros(shape=[N, H, W, 1])
    zero_tensor3 = tf.zeros(shape=[N, H, W, 3])
    logit_c3 = tf.tile(logit_c, [1, 1, 1, 3])

    # normal loss (l2)
    gt_normal = slice_tensor(normal, logit_n)
    n_loss = tf.losses.mean_squared_error(zero_tensor3, logit_c3 * (gt_normal - logit_n),
                                          weights=mask_crop3)

    real_n_loss = tf.losses.absolute_difference(gt_normal, logit_n, weights=mask_crop3)

    # depth loss (l2)
    gt_depth = slice_tensor(depth, logit_n)
    d_loss = tf.losses.mean_squared_error(zero_tensor, logit_c * (gt_depth - logit_d),
                                          weights=mask_crop)

    real_d_loss = tf.losses.absolute_difference(gt_depth, logit_d, weights=mask_crop)

    # omega_loss (l2)
    omega_loss = tf.losses.mean_squared_error(zero_tensor, logit_c - 1.0, weights=mask_crop)

    # depth sample loss (l2)
    d_mask_crop = slice_tensor(ds_mask, logit_n)
    ds_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=d_mask_crop)

    # regularization loss (l2)
    r_loss = reg_loss(logit_n, logit_d, shape_mask, cl_mask_inverse, fl_mask_inv)

    total_loss = hyper_params['dlossScale'] * d_loss + hyper_params['nlossScale'] * n_loss + omega_loss + \
                 hyper_params['dsWeight'] * ds_loss + hyper_params['regWeight'] * r_loss

    shape_mask_crop = slice_tensor(shape_mask, logit_n)
    shape_mask_crop3 = tf.tile(shape_mask_crop, [1, 1, 1, 3])
    shape_mask_crop4 = tf.tile(shape_mask_crop, [1, 1, 1, 4])
    cl_mask_inverse4 = tf.tile(cl_mask_inverse, [1, 1, 1, 4])
    gt_f = slice_tensor(gt_f, logit_n) * cl_mask_inverse4
    logit_f = logit_f * shape_mask_crop4 * cl_mask_inverse4
    cur_shape = tf.shape(logit_n)
    lc = tf.zeros([cur_shape[0], cur_shape[1], cur_shape[2], 1], tf.float32)
    gt_coeff_a = tf.concat([tf.slice(gt_f, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
    gt_coeff_b = tf.concat([tf.slice(gt_f, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)
    f_coeff_a = tf.concat([tf.slice(logit_f, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
    f_coeff_b = tf.concat([tf.slice(logit_f, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)

    return total_loss, d_loss, n_loss, ds_loss, r_loss, real_d_loss, real_n_loss, omega_loss, \
           gt_normal * shape_mask_crop3, logit_n * shape_mask_crop3, gt_depth * shape_mask_crop, \
           logit_d * shape_mask_crop, gt_ds * shape_mask_crop, npr, \
           slice_tensor(cl_mask_inverse, logit_n) * mask_crop, \
           logit_c * shape_mask_crop, gt_coeff_a, gt_coeff_b, f_coeff_a, f_coeff_b
def loss(logit_d, logit_n, logit_c, normal, depth, shape_mask, ds_mask, cl_mask_inverse, reg_weight,
         fl_mask_inv, scope='loss'):
    with tf.name_scope(scope) as _:
        img_shape = tf.shape(logit_d)
        N = img_shape[0]
        H = img_shape[1]
        W = img_shape[2]

        mask_crop = slice_tensor(shape_mask, logit_n)
        mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])

        zero_tensor = tf.zeros(shape=[N, H, W, 1])
        zero_tensor3 = tf.zeros(shape=[N, H, W, 3])
        logit_c3 = tf.tile(logit_c, [1, 1, 1, 3])

        # normal loss (l2)
        gt_normal = slice_tensor(normal, logit_n)
        n_loss = tf.losses.mean_squared_error(zero_tensor3, logit_c3 * (gt_normal - logit_n),
                                              weights=mask_crop3)

        real_n_loss = tf.losses.absolute_difference(gt_normal, logit_n, weights=mask_crop3)

        # depth loss (l2)
        gt_depth = slice_tensor(depth, logit_n)
        d_loss = tf.losses.mean_squared_error(zero_tensor, logit_c * (gt_depth - logit_d),
                                              weights=mask_crop)

        real_d_loss = tf.losses.absolute_difference(gt_depth, logit_d, weights=mask_crop)

        # omega_loss (l2)
        omega_loss = tf.losses.mean_squared_error(zero_tensor, logit_c - 1.0, weights=mask_crop)

        # depth sample loss (l2)
        d_mask_crop = slice_tensor(ds_mask, logit_n)
        ds_loss = tf.losses.mean_squared_error(gt_depth, logit_d, weights=d_mask_crop)

        # regularization loss (l2)
        r_loss = reg_loss(logit_n, logit_d, shape_mask, cl_mask_inverse, fl_mask_inv)

        total_loss = hyper_params['dlossScale'] * d_loss + hyper_params['nlossScale'] * n_loss + omega_loss + \
                     hyper_params['dsWeight'] * ds_loss + reg_weight * r_loss

        return total_loss, d_loss, n_loss, ds_loss, r_loss, real_d_loss, real_n_loss, omega_loss
def loss(logit_d, logit_n, logit_c, normal, depth, shape_mask, ds_mask,
         cl_mask_inverse, gt_ds, npr, fl_mask_inv):
    img_shape = logit_d.get_shape().as_list()
    N = img_shape[0]
    H = img_shape[1]
    W = img_shape[2]

    mask_crop = slice_tensor(shape_mask, logit_n)
    mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])

    zero_tensor = tf.zeros(shape=[N, H, W, 1])
    zero_tensor3 = tf.zeros(shape=[N, H, W, 3])
    logit_c3 = tf.tile(logit_c, [1, 1, 1, 3])

    # normal loss (l2)
    gt_normal = slice_tensor(normal, logit_n)
    n_loss = tf.losses.mean_squared_error(zero_tensor3,
                                          logit_c3 * (gt_normal - logit_n),
                                          weights=mask_crop3)

    real_n_loss = tf.losses.absolute_difference(gt_normal,
                                                logit_n,
                                                weights=mask_crop3)

    # depth loss (l2)
    gt_depth = slice_tensor(depth, logit_n)
    d_loss = tf.losses.mean_squared_error(zero_tensor,
                                          logit_c * (gt_depth - logit_d),
                                          weights=mask_crop)

    real_d_loss = tf.losses.absolute_difference(gt_depth,
                                                logit_d,
                                                weights=mask_crop)

    # omega_loss (l2)
    omega_loss = tf.losses.mean_squared_error(zero_tensor,
                                              logit_c - 1.0,
                                              weights=mask_crop)

    # depth sample loss (l2)
    d_mask_crop = slice_tensor(ds_mask, logit_n)
    ds_loss = tf.losses.mean_squared_error(gt_depth,
                                           logit_d,
                                           weights=d_mask_crop)

    # regularization loss (l2)
    r_loss = reg_loss(logit_n, logit_d, shape_mask, cl_mask_inverse,
                      fl_mask_inv)

    total_loss = hyper_params['dlossScale'] * d_loss + hyper_params['nlossScale'] * n_loss + omega_loss + \
                 hyper_params['dsWeight'] * ds_loss + hyper_params['regWeight'] * r_loss

    shape_mask_crop = slice_tensor(shape_mask, logit_n)
    shape_mask_crop3 = tf.tile(shape_mask_crop, [1, 1, 1, 3])

    return total_loss, d_loss, n_loss, ds_loss, r_loss, real_d_loss, real_n_loss, omega_loss, \
           gt_normal * shape_mask_crop3, logit_n * shape_mask_crop3, gt_depth * shape_mask_crop, \
           logit_d * shape_mask_crop, gt_ds * shape_mask_crop, npr, \
           slice_tensor(cl_mask_inverse, logit_n) * shape_mask_crop, logit_c * shape_mask_crop
Exemple #9
0
def collect_vis_img(logit_f, npr_lines, gt_field, shape_mask, line_inv, ds, fm,
                    sel_m, vdotn, fm_inv):
    with tf.name_scope('collect_train_img') as _:
        mask_crop = slice_tensor(shape_mask, logit_f)
        l_mask_crop = slice_tensor(line_inv, logit_f)
        combined_mask = mask_crop * l_mask_crop
        combined_mask = tf.tile(combined_mask, [1, 1, 1, 4])

        logit_f = logit_f * combined_mask
        gt_field = slice_tensor(gt_field, logit_f) * combined_mask

        cur_shape = tf.shape(logit_f)
        lc = tf.zeros([cur_shape[0], cur_shape[1], cur_shape[2], 1],
                      tf.float32)

        f_coeff_a = tf.concat(
            [tf.slice(logit_f, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
        f_coeff_b = tf.concat(
            [tf.slice(logit_f, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)

        gt_coeff_a = tf.concat(
            [tf.slice(gt_field, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
        gt_coeff_b = tf.concat(
            [tf.slice(gt_field, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)

        npr_lines = slice_tensor(npr_lines, logit_f)
        line_inv = slice_tensor(line_inv, logit_f)

        depth_sample = slice_tensor(ds, logit_f)
        feature_mask = slice_tensor(fm, logit_f)
        feature_mask_inv = slice_tensor(fm_inv, logit_f)
        sel_mask = slice_tensor(sel_m, logit_f)
        vdotn_scalar = slice_tensor(vdotn, logit_f)

    train_npr_proto = tf.summary.image('train_npr_lines', npr_lines,
                                       hyper_params['nbDispImg'])
    train_gt_coeff_a_proto = tf.summary.image('train_gt_a', gt_coeff_a,
                                              hyper_params['nbDispImg'])
    train_gt_coeff_b_proto = tf.summary.image('train_gt_b', gt_coeff_b,
                                              hyper_params['nbDispImg'])
    train_f_coeff_a_proto = tf.summary.image('train_f_a', f_coeff_a,
                                             hyper_params['nbDispImg'])
    train_f_coeff_b_proto = tf.summary.image('train_f_b', f_coeff_b,
                                             hyper_params['nbDispImg'])
    train_mask_proto = tf.summary.image('train_mask', line_inv,
                                        hyper_params['nbDispImg'])
    train_ds_proto = tf.summary.image('train_ds', depth_sample,
                                      hyper_params['nbDispImg'])
    train_fm_proto = tf.summary.image('train_feature_mask', feature_mask,
                                      hyper_params['nbDispImg'])
    train_fm_inv_proto = tf.summary.image('train_feature_mask_inv',
                                          feature_mask_inv,
                                          hyper_params['nbDispImg'])
    train_selm_proto = tf.summary.image('train_sel_mask', sel_mask,
                                        hyper_params['nbDispImg'])
    train_vdotn_proto = tf.summary.image('train_vdotn_scalar', vdotn_scalar,
                                         hyper_params['nbDispImg'])

    return [
        train_npr_proto, train_gt_coeff_a_proto, train_gt_coeff_b_proto,
        train_f_coeff_a_proto, train_f_coeff_b_proto, train_mask_proto,
        train_ds_proto, train_fm_proto, train_fm_inv_proto, train_selm_proto,
        train_vdotn_proto
    ]
Exemple #10
0
def collect_vis_img(logit_d, logit_n, logit_c, logit_f, npr, normal, depth,
                    shape_mask, ds, gt_field, cline_mask, fm, fm_inv, sel_m,
                    vdotn):
    with tf.name_scope('collect_train_img') as _:
        mask_crop = slice_tensor(shape_mask, logit_n)
        mask_crop3 = tf.tile(mask_crop, [1, 1, 1, 3])
        mask_crop4 = tf.tile(mask_crop, [1, 1, 1, 4])
        mask3 = tf.tile(shape_mask, [1, 1, 1, 3])
        mask4 = tf.tile(shape_mask, [1, 1, 1, 4])
        line_mask4 = tf.tile(slice_tensor(cline_mask, logit_n), [1, 1, 1, 4])

        logit_n = logit_n * mask_crop3
        logit_d = logit_d * mask_crop
        logit_c = logit_c * mask_crop

        gt_normal = slice_tensor(normal * mask3, logit_n)
        gt_depth = slice_tensor(depth * shape_mask, logit_d)

        npr_lines = slice_tensor(npr, logit_n)

        gt_field = slice_tensor(gt_field * mask4, logit_n)
        gt_field = gt_field * line_mask4
        logit_f = logit_f * mask_crop4 * line_mask4
        cur_shape = logit_n.get_shape().as_list()
        lc = tf.zeros([cur_shape[0], cur_shape[1], cur_shape[2], 1],
                      tf.float32)
        f_coeff_a = tf.concat(
            [tf.slice(logit_f, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
        f_coeff_b = tf.concat(
            [tf.slice(logit_f, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)
        gt_coeff_a = tf.concat(
            [tf.slice(gt_field, [0, 0, 0, 0], [-1, -1, -1, 2]), lc], axis=3)
        gt_coeff_b = tf.concat(
            [tf.slice(gt_field, [0, 0, 0, 2], [-1, -1, -1, 2]), lc], axis=3)

        feature_mask = slice_tensor(fm, logit_f)
        feature_mask_inv = slice_tensor(fm_inv, logit_n)
        sel_mask = slice_tensor(sel_m, logit_f)
        vdotn_scalar = slice_tensor(vdotn, logit_f)

    train_npr_proto = tf.summary.image('train_npr_lines', npr_lines,
                                       hyper_params['nbDispImg'])
    train_gtn_proto = tf.summary.image('train_gt_normal', gt_normal,
                                       hyper_params['nbDispImg'])
    train_gtd_proto = tf.summary.image('train_gt_depth', gt_depth,
                                       hyper_params['nbDispImg'])
    train_fn_proto = tf.summary.image('train_out_normal', logit_n,
                                      hyper_params['nbDispImg'])
    train_fd_proto = tf.summary.image('train_out_depth', logit_d,
                                      hyper_params['nbDispImg'])
    train_gtds_proto = tf.summary.image('train_gt_ds', ds,
                                        hyper_params['nbDispImg'])
    train_fcfm_proto = tf.summary.image('train_confidence_mp', logit_c,
                                        hyper_params['nbDispImg'])
    train_gt_coeff_a_proto = tf.summary.image('train_gt_a', gt_coeff_a,
                                              hyper_params['nbDispImg'])
    train_gt_coeff_b_proto = tf.summary.image('train_gt_b', gt_coeff_b,
                                              hyper_params['nbDispImg'])
    train_f_coeff_a_proto = tf.summary.image('train_f_a', f_coeff_a,
                                             hyper_params['nbDispImg'])
    train_f_coeff_b_proto = tf.summary.image('train_f_b', f_coeff_b,
                                             hyper_params['nbDispImg'])
    train_cline_mask_inv_proto = tf.summary.image('train_clmask_inv',
                                                  cline_mask,
                                                  hyper_params['nbDispImg'])
    train_fm_proto = tf.summary.image('train_feature_mask', feature_mask,
                                      hyper_params['nbDispImg'])
    train_fm_inv_proto = tf.summary.image('train_feature_mask_inv',
                                          feature_mask_inv,
                                          hyper_params['nbDispImg'])
    train_selm_proto = tf.summary.image('train_sel_mask', sel_mask,
                                        hyper_params['nbDispImg'])
    train_vdotn_proto = tf.summary.image('train_vdotn_scalar', vdotn_scalar,
                                         hyper_params['nbDispImg'])

    return [
        train_npr_proto, train_gtn_proto, train_gtd_proto, train_fn_proto,
        train_fd_proto, train_gtds_proto, train_fcfm_proto,
        train_gt_coeff_a_proto, train_gt_coeff_b_proto, train_f_coeff_a_proto,
        train_f_coeff_b_proto, train_cline_mask_inv_proto, train_fm_proto,
        train_fm_inv_proto, train_selm_proto, train_vdotn_proto
    ]