def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        pred, label)
    pc_loss = tf.reduce_mean(dists_forward + dists_backward)
    end_points['pcloss'] = pc_loss
    tf.summary.scalar('pcloss', pc_loss)

    d1, _, d2, _ = tf_nndistance.nn_distance(end_points['pc1_xyz'], label)
    pc1_loss = tf.reduce_mean(d1) + tf.reduce_mean(d2)
    tf.summary.scalar('pc1loss', pc1_loss)
    loss = pc_loss + 0.1 * pc1_loss

    return loss * 100, end_points
Beispiel #2
0
def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pred, label)
    loss = tf.reduce_mean(dists_forward+dists_backward)
    end_points['pcloss'] = loss
    return loss*100, end_points
Beispiel #3
0
def chamfer_loss(pc1, pc2):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(pc1, pc2)
    # loss = dists_forward+dists_backward
    loss = tf.reduce_mean(dists_forward + dists_backward, axis=1)
    return loss
Beispiel #4
0
def get_cd_loss2(pred, gt, radius, forward_weight=1.0, threshold=100):
    """
    pred: BxNxC,
    label: BxN,
    forward_weight: relative weight for forward_distance
    """
    with tf.name_scope("cd_loss"):
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            gt, pred)
        if threshold is not None:
            forward_threshold = tf.reduce_mean(
                dists_forward, keepdims=True, axis=1) * threshold
            backward_threshold = tf.reduce_mean(
                dists_backward, keepdims=True, axis=1) * threshold
            # only care about distance within threshold (ignore strong outliers)
            dists_forward = tf.where(dists_forward < forward_threshold,
                                     dists_forward,
                                     tf.zeros_like(dists_forward))
            dists_backward = tf.where(dists_backward < backward_threshold,
                                      dists_backward,
                                      tf.zeros_like(dists_backward))
        # dists_forward is for each element in gt, the closest distance to this element
        dists_forward = tf.reduce_mean(dists_forward, axis=1)
        dists_backward = tf.reduce_mean(dists_backward, axis=1)
        CD_dist = forward_weight * dists_forward + dists_backward
        # CD_dist_norm = CD_dist/radius
        cd_loss = tf.reduce_mean(CD_dist)
        return cd_loss  #, None
def get_loss(pred, label):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        pred, label)
    loss = tf.reduce_mean(dists_forward + dists_backward, axis=1)
    return loss * 100
Beispiel #6
0
def mesh_loss(pred, placeholders, block_id):
    gt_pt = placeholders['labels'][:, :3]  # gt points
    gt_nm = placeholders['labels'][:, 3:]  # gt normals

    # edge in graph
    nod1 = tf.gather(pred, placeholders['edges'][block_id - 1][:, 0])
    nod2 = tf.gather(pred, placeholders['edges'][block_id - 1][:, 1])
    edge = tf.subtract(nod1, nod2)

    # edge length loss
    edge_length = tf.reduce_sum(tf.square(edge), 1)
    edge_loss = tf.reduce_mean(edge_length) * 300

    # chamer distance
    dist1, idx1, dist2, idx2 = tf_nndistance.nn_distance(gt_pt, pred)
    point_loss = (tf.reduce_mean(dist1) + 0.55 * tf.reduce_mean(dist2)) * 3000

    # normal cosine loss
    normal = tf.gather(gt_nm, tf.squeeze(idx2, 0))
    normal = tf.gather(normal, placeholders['edges'][block_id - 1][:, 0])
    cosine = tf.abs(tf.reduce_sum(tf.multiply(unit(normal), unit(edge)), 1))
    # cosine = tf.where(tf.greater(cosine,0.866), tf.zeros_like(cosine), cosine) # truncated
    normal_loss = tf.reduce_mean(cosine) * 0.5

    total_loss = point_loss + edge_loss + normal_loss
    return total_loss
Beispiel #7
0
def get_labels_seg(pcl_gt, pcl_pred, metric):
    ''' 
    Point wise correspondences between two point sets
    args:
	pcl_gt: (batch_size, n_pts, 3), gt pcl
	pcl_pred: (batch_size, n_pts, 3), predicted pcl
	metric: str, 'chamfer' or 'emd'
		metric to be considered for returning corresponding points
    returns:
	pts_match_fwd: gt to pred point-wise correspondence
		       each point in gt is mapped to nearest point in pred
	pts_match_bwd: pred to gt point-wise correspondence
		       each point in pred is mapped to nearest point in gt
	pts_match: one-to-one mapping between pred and gt, acc. to emd
    '''
    if metric == 'chamfer':
	_, pts_match_fwd, _, pts_match_bwd = tf_nndistance.nn_distance(\
			pcl_gt, pcl_pred) 
	return pts_match_fwd, pts_match_bwd
    elif metric == 'emd':
	pts_match, _ = auction_match(pcl_gt, pcl_pred)
	return pts_match
    else:
	print 'Undefined metric'
	return None 
Beispiel #8
0
def get_metrics(gt_pcl, pred_pcl, args):
    '''
    Obtain chamfer and emd distances between GT and predicted pcl
    args:
            gt_pcl: float, (BS,N_PTS,3); GT point cloud
            pred_pcl: float, (BS,N_PTS,3); predicted point cloud
    returns:
            dists_forward: float, (BS); forward chamfer distance
            dists_backward: float, (BS); backward chamfer distance
            chamfer_distance: float, (BS); chamfer distance
            emd: float, (BS); earth mover's distance
    '''
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        gt_pcl, pred_pcl)
    dists_forward = tf.reduce_mean(
        dists_forward, axis=1)  # (BATCH_SIZE,args.N_PTS) --> (BATCH_SIZE)
    dists_backward = tf.reduce_mean(dists_backward, axis=1)
    chamfer_distance = dists_backward + dists_forward

    X, _ = tf.meshgrid(range(args.batch_size),
                       range(args.OUTPUT_PCL_SIZE),
                       indexing='ij')
    ind, _ = auction_match(pred_pcl,
                           gt_pcl)  # Ind corresponds to points in pcl_gt
    ind = tf.stack((X, ind), -1)
    emd = tf.reduce_mean(
        tf.reduce_sum((tf.gather_nd(gt_pcl, ind) - pred_pcl)**2, axis=-1),
        axis=1
    )  # (BATCH_SIZE,args.N_PTS,3) --> (BATCH_SIZE,args.N_PTS) --> (BATCH_SIZE)
    return dists_forward, dists_backward, chamfer_distance, emd
Beispiel #9
0
    def __init__(self, seq_length, num_points=128):

        self.ground_truth = tf.placeholder(tf.float32,
                                           [1, seq_length, num_points, 3])
        self.prediction = tf.placeholder(tf.float32,
                                         [1, seq_length, num_points, 3])

        gt_frames = tf.split(value=self.ground_truth,
                             num_or_size_splits=seq_length,
                             axis=1)
        gt_frames = [tf.squeeze(input=frame, axis=[1]) for frame in gt_frames]

        pd_frames = tf.split(value=self.prediction,
                             num_or_size_splits=seq_length,
                             axis=1)
        pd_frames = [tf.squeeze(input=frame, axis=[1]) for frame in pd_frames]

        cds, emds = [], []

        for i in range(seq_length):
            match = tf_approxmatch.approx_match(gt_frames[i], pd_frames[i])
            emd_distance = tf.reduce_mean(
                tf_approxmatch.match_cost(gt_frames[i], pd_frames[i], match))
            emds.append(emd_distance)

            dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
                pd_frames[i], gt_frames[i])
            cd_distance = tf.reduce_mean(dists_forward + dists_backward)
            cds.append(cd_distance)

        self.cds = tf.stack(cds, 0)
        self.emds = tf.stack(emds, 0)
Beispiel #10
0
def get_loss(pred, label):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        pred, label)
    loss_per_sample = dists_forward + dists_backward
    loss = tf.reduce_mean(loss_per_sample)
    return loss, loss_per_sample
Beispiel #11
0
def get_chamfer_metrics(gt_pcl, pred_pcl):
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        gt_pcl, pred_pcl)
    dists_forward = tf.reduce_mean(
        dists_forward, axis=1)  # (BATCH_SIZE,NUM_POINTS) --> (BATCH_SIZE)
    dists_backward = tf.reduce_mean(dists_backward, axis=1)
    chamfer_distance = dists_backward + dists_forward
    return dists_forward, dists_backward, chamfer_distance
Beispiel #12
0
def get_cd_loss(pred, gt, radius):
    """ pred: BxNxC,
        label: BxN, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
    #dists_forward is for each element in gt, the cloest distance to this element
    CD_dist = 0.8 * dists_forward + 0.2 * dists_backward
    CD_dist = tf.reduce_mean(CD_dist, axis=1)
    CD_dist_norm = CD_dist / radius
    cd_loss = tf.reduce_mean(CD_dist_norm)
    return cd_loss, None
def create_loss(output, truth, loss_type='emd'):
    if loss_type == 'emd':
        match = tf_approxmatch.approx_match(output, truth)
        build_loss = tf.reduce_mean(
            tf_approxmatch.match_cost(output, truth, match))

    else:
        cost_p1_p2, _, cost_p2_p1, _ = tf_nndistance.nn_distance(output, truth)
        build_loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
    # tf.summary.scalar('build loss', build_loss)
    # tf.add_to_collection('losses', build_loss)
    return build_loss
def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        pred, label)
    likelihoods = end_points['likelihoods']
    bits = tf.reduce_sum(tf.log(likelihoods), axis=(1, 2, 3)) / -np.log(2)

    loss = tf.reduce_mean(dists_forward + dists_backward)
    main_loss = 0.5 * tf.reduce_mean(squared_error) + tf.reduce_mean(bits)
    end_points['pcloss'] = loss
    return loss * 100, end_points
Beispiel #15
0
def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
        pred, label)
    pc_loss = tf.reduce_mean(dists_forward + dists_backward)
    end_points['pcloss'] = pc_loss

    match = tf_approxmatch.approx_match(label, pred)
    loss = tf.reduce_mean(tf_approxmatch.match_cost(label, pred, match))
    tf.summary.scalar('loss', loss)
    return loss, end_points
Beispiel #16
0
def get_loss(img, pc_in, is_training=True, reuse_mode=False):
    logits = model.img_encoder(img,
                               conv_net,
                               fully_con,
                               is_training=is_training,
                               reuse_mode=reuse_mode)
    pc_out = model.pc_decoder(logits,
                              fully_dec,
                              is_training=is_training,
                              reuse_mode=reuse_mode)
    cost_p1_p2, _, cost_p2_p1, _ = nn_distance(pc_in, pc_out)
    cd_loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
    return cd_loss, pc_out
Beispiel #17
0
def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    # Reconstruction loss
    dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pred, label)
    loss = tf.reduce_mean(dists_forward+dists_backward)
    end_points['pcloss'] = loss
    # KL Divergence loss
    kl_div_loss = 1 + end_points['z_std'] - tf.square(end_points['z_mean']) - tf.exp(end_points['z_std'])
    kl_div_loss = -0.5 * tf.reduce_sum(kl_div_loss, 1)
    kl_div_loss = tf.reduce_mean(kl_div_loss) * 0.001
    end_points['kl_div_loss'] = kl_div_loss
    return loss*100 + kl_div_loss, end_points
Beispiel #18
0
    def _reconstruction_loss(self, recon, input):

        if self.loss == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(recon, input)
            loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
        elif self.loss == 'emd':
            match = approx_match(recon, input)
            loss = match_cost(recon, input, match)
            loss = tf.reduce_mean(loss)
            loss = tf.div(loss, self.point_cloud_shape[0]) # return point-wise loss
        elif self.loss == 'hausdorff':
            distances = directed_hausdorff(input, recon) # partial-noisy ->fake_clean
            loss = tf.reduce_mean(distances)
        return loss
Beispiel #19
0
    def _reconstruction_loss(self, recon, input):
        #latent_code = encoder(input, self.is_training)
        #recon = decoder(latent_code, self.is_training)

        if self.loss == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(recon, input)
            loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
        elif self.loss == 'emd':
            match = approx_match(recon, input)
            loss = match_cost(recon, input, match)
            loss = tf.reduce_mean(loss)
            loss = tf.div(loss, self.point_cloud_shape[0]) # return point-wise loss
        
        return loss
Beispiel #20
0
def get_chamfer_metrics(gt_pcl, pred_pcl):
    '''
    Obtain chamfer distance between GT and predicted point clouds
    args:
            gt_pcl: float, (BS,N_PTS,3); GT point cloud
            pred_pcl: float, (BS,N_PTS,3); predicted point cloud
    returns:
            dists_forward: float, (); forward chamfer distance
            dists_backward: float, (); backward chamfer distance
            chamfer_distance: float, (); chamfer distance
    '''
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt_pcl, pred_pcl)
    dists_forward = tf.reduce_mean(dists_forward, axis=1) # (BATCH_SIZE,NUM_POINTS) --> (BATCH_SIZE)
    dists_backward = tf.reduce_mean(dists_backward, axis=1)
    chamfer_distance = dists_backward + dists_forward
    return dists_forward, dists_backward, chamfer_distance
Beispiel #21
0
def get_chamfer_dist(gt_pcl, pred_pcl):
    '''
    Calculate chamfer distance between two point clouds
    Args:
        gt_pcl: (BS,N_pts,3); GT point cloud
        pred_pcl: (BS,N_pts,3); predicted point cloud
    Returns:
        dists_forward: (); averaged forward chamfer distance
        dists_backward: (); averaged backward chamfer distance
        chamfer_distance: (); averaged chamfer distance
    '''
    # FWD: GT-->Pred, Bwd: Pred-->GT
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt_pcl, pred_pcl)
    dists_forward = tf.reduce_mean(dists_forward) # (BATCH_SIZE,NUM_POINTS) --> (BATCH_SIZE)
    dists_backward = tf.reduce_mean(dists_backward)
    chamfer_distance = dists_backward + dists_forward
    return dists_forward, dists_backward, chamfer_distance
Beispiel #22
0
def hausdorff_loss(pred, gt, radius=1.0, forward_weight=1.0, threshold=None):
    """
    pred: BxNxC,
    label: BxN,
    forward_weight: relative weight for forward_distance
    """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
    # only care about distance within threshold (ignore strong outliers)
    if threshold is not None:
        dists_forward = tf.where(dists_forward < threshold, dists_forward,
                                 tf.zeros_like(dists_forward))
        dists_backward = tf.where(dists_backward < threshold, dists_backward,
                                  tf.zeros_like(dists_backward))
    # dists_forward is for each element in gt, the closest distance to this element
    dists_forward = tf.reduce_max(dists_forward, axis=1)
    dists_backward = tf.reduce_max(dists_backward, axis=1)
    CD_dist = forward_weight * dists_forward + dists_backward
    CD_dist_norm = CD_dist / radius
    cd_loss = tf.reduce_max(CD_dist_norm)
    return cd_loss
Beispiel #23
0
def get_rec_metrics(gt_pcl, pred_pcl, batch_size=10, num_points=1024):
    '''
    Calculate chamfer and emd metrics
    args:
            gt_pcl: float, (BS,N_PTS,3); ground truth point cloud
            pred_pcl: float, (BS,N_PTS,3); predicted point cloud
    '''
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt_pcl, 
            pred_pcl)
    dists_forward = tf.reduce_mean(tf.sqrt(dists_forward), axis=1) # (B, )
    dists_backward = tf.reduce_mean(tf.sqrt(dists_backward), axis=1) # (B, )
    chamfer_distance = dists_backward + dists_forward

    X,_ = tf.meshgrid(range(batch_size), range(num_points), indexing='ij')
    ind, _ = auction_match(pred_pcl, gt_pcl) # Ind corresponds to points in pcl_gt
    print X.get_shape()
    print ind.get_shape()
    ind = tf.stack((X, ind), -1)
    print gt_pcl.get_shape()
    print ind.get_shape()
    print 
    emd = tf.reduce_mean(tf.sqrt(tf.reduce_sum((tf.gather_nd(gt_pcl, ind) - \
            pred_pcl)**2, axis=-1)), axis=1) # (BATCH_SIZE,NUM_POINTS,3) --> (BATCH_SIZE,NUM_POINTS) --> (BATCH_SIZE)
    return dists_forward, dists_backward, chamfer_distance, emd
Beispiel #24
0
    for i in range(len(pre_max_val)):
        #get the most important critical points if NUM_ADD < number of critical points
        #the importance is demtermined by counting how many elements in the global featrue vector is
        #contributed by one specific point
        idx, counts = np.unique(np.argmax(pre_max_val[i], axis=0),
                                return_counts=True)
        idx_idx = np.argsort(counts)
        if len(counts) > NUM_ADD:
            points = data[i][idx[idx_idx[-NUM_ADD:]]]
        else:
            points = data[i][idx]
            tmp_num = NUM_ADD - len(counts)
            while (tmp_num > len(counts)):
                points = np.concatenate([points, data[i][idx]])
                tmp_num -= len(counts)
            points = np.concatenate([points, data[i][-tmp_num:]])

        critical_points.append(points)
    critical_points = np.stack(critical_points)
    return critical_points


if __name__ == '__main__':
    with tf.Graph().as_default():
        inputs = tf.zeros((32, 1024, 3))
        inputs2 = tf.zeros((32, 122, 3))
        outputs = get_model(inputs, tf.constant(True))
        print(outputs)
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            inputs2, inputs)
Beispiel #25
0
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
    #dists_forward is for each element in gt, the cloest distance to this element
    CD_dist = 0.8 * dists_forward + 0.2 * dists_backward
    CD_dist = tf.reduce_mean(CD_dist, axis=1)
    CD_dist_norm = CD_dist / radius
    cd_loss = tf.reduce_mean(CD_dist_norm)
    return cd_loss, None


def get_uniform_loss_knn(pred):
    var, _ = knn_point(6, pred, pred)
    mean = tf.reduce_mean(var, axis=2)
    _, variance = tf.nn.moments(mean, axes=[1])
    variance1 = tf.reduce_sum(variance)
    _, var = tf.nn.moments(var, axes=[2])
    var = tf.reduce_sum(var)
    variance2 = tf.reduce_sum(var)
    return variance1 + variance2


if __name__ == '__main__':
    gt = tf.constant([[[1, 0, 0], [2, 0, 0], [3, 0, 0], [4, 0, 0]]],
                     tf.float32)
    pred = tf.constant([[[-10, 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]]],
                       tf.float32)

    dists_forward, idx1, dists_backward, idx2 = tf_nndistance.nn_distance(
        gt, pred)
    with tf.Session() as sess:
        print(idx1.eval())  # for each element in gt, the idx of pred
        print(idx2.eval())  # for each element in pred,
Beispiel #26
0
def chamfer_distance(pcd1, pcd2):
    dist1, _, dist2, _ = tf_nndistance.nn_distance(pcd1, pcd2)
    return dist1, dist2
Beispiel #27
0
def attack():
    is_training = False
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            pointclouds_pl, labels_pl = MODEL.placeholder_inputs(
                BATCH_SIZE, NUM_POINT)
            is_training_pl = tf.placeholder(tf.bool, shape=())

            pert = tf.get_variable(
                name='pert',
                shape=[BATCH_SIZE, NUM_ADD, 3],
                initializer=tf.truncated_normal_initializer(stddev=0.01))
            initial_point_pl = tf.placeholder(shape=[BATCH_SIZE, NUM_ADD, 3],
                                              dtype=tf.float32)
            point_added = initial_point_pl + pert
            pointclouds_input = tf.concat([pointclouds_pl, point_added],
                                          axis=1)

            pred, end_points = MODEL.get_model(pointclouds_input,
                                               is_training_pl)

            #adv loss
            adv_loss = MODEL.get_adv_loss(pred, TARGET)

            dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
                point_added, pointclouds_pl)
            if FLAGS.constraint == 'c':  #Chamfer
                dists_forward = tf.reduce_mean(dists_forward, axis=1)
                dists_backward = tf.reduce_mean(dists_backward,
                                                axis=1)  #not used
            elif FLAGS.constraint == 'h':  #Hausdorff
                dists_forward = tf.reduce_max(dists_forward, axis=1)
                dists_backward = tf.reduce_max(dists_backward,
                                               axis=1)  #not used
            else:
                raise Exception(
                    "Invalid constraint type. Please try c for Chamfer and h for Hausdorff"
                )

            dist_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
            lr_attack = tf.placeholder(dtype=tf.float32)
            attack_optimizer = tf.train.AdamOptimizer(lr_attack)
            attack_op = attack_optimizer.minimize(
                adv_loss +
                tf.reduce_mean(tf.multiply(dist_weight, dists_forward)),
                var_list=[pert])

            vl = tf.global_variables()
            vl = [x for x in vl if 'pert' not in x.name]
            saver = tf.train.Saver(vl)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        #config.log_device_placement = True
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        ops = {
            'pointclouds_pl':
            pointclouds_pl,
            'labels_pl':
            labels_pl,
            'is_training_pl':
            is_training_pl,
            'pointclouds_input':
            pointclouds_input,
            'initial_point_pl':
            initial_point_pl,
            'dist_weight':
            dist_weight,
            'pert':
            pert,
            'point_added':
            point_added,
            'pre_max':
            end_points['pre_max'],
            'post_max':
            end_points['post_max'],
            'pred':
            pred,
            'adv_loss':
            adv_loss,
            #'dists_backward':dists_backward,
            'dists_forward':
            dists_forward,
            'total_loss':
            tf.reduce_mean(tf.multiply(dist_weight, dists_forward)) + adv_loss,
            'lr_attack':
            lr_attack,
            'attack_op':
            attack_op
        }

        saver.restore(sess, MODEL_PATH)
        print('model restored!')

        dist_list = []
        for victim in [
                0, 1, 2, 3
        ]:  #the class index of selected 10 largest classed in ModelNet40
            if victim == TARGET:
                continue
            attacked_data = attacked_data_all[
                victim][:25]  #attacked_data shape:25*1024*3
            for j in range(25 // BATCH_SIZE):
                dist, img = attack_one_batch(
                    sess, ops,
                    attacked_data[j * BATCH_SIZE:(j + 1) * BATCH_SIZE])
                dist_list.append(dist)
                np.save(
                    os.path.join('.', DUMP_DIR,
                                 '{}_{}_{}_adv.npy'.format(victim, TARGET, j)),
                    img)
Beispiel #28
0
    def gen_batch():
        np.random.seed()
        point_cloud = np.random.rand(batch_size, cfg.VOXEL_POINT_COUNT, 3)
        mask = np.random.choice(a=[False, True],
                                size=(batch_size, cfg.VOXEL_POINT_COUNT, 1),
                                p=[0.8, 0.2])
        for i in range(batch_size):
            if np.sum(mask[i]) == 0:
                mask[i, np.random.randint(cfg.VOXEL_POINT_COUNT), 0] = True
        return point_cloud, mask

    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    sys.path.append(os.path.join(BASE_DIR, 'nn_distance'))
    from tf_nndistance import nn_distance
    dists_forward, _, dists_backward, _ = nn_distance(result, point_cloud_pl)
    loss_pred = tf.reduce_mean(dists_forward + dists_backward)
    tf.summary.scalar('loss_pred', loss_pred)
    if False:
        tvars = tf.trainable_variables()
        lossL1 = tf.add_n(
            [tf.reduce_sum(tf.abs(v))
             for v in tvars if 'bias' not in v.name]) * 0.001
        tf.summary.scalar('lossL1', lossL1)
        loss = loss_pred + lossL1
    else:
        loss = loss_pred
    train = tf.train.AdamOptimizer(learning_rate=0.00005).minimize(loss)

    saver = tf.train.Saver(max_to_keep=2)
Beispiel #29
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 num_points=1024,
                 num_samples=8,
                 knn=False,
                 alpha=1.0,
                 beta=1.0,
                 learning_rate=0.001,
                 max_gradient_norm=5.0,
                 is_training=False):

        self.global_step = tf.Variable(0, trainable=False)

        self.inputs = tf.placeholder(tf.float32,
                                     [batch_size, seq_length, num_points, 3])
        frames = tf.split(value=self.inputs,
                          num_or_size_splits=seq_length,
                          axis=1)
        frames = [tf.squeeze(input=frame, axis=[1]) for frame in frames]

        cell1 = PointLSTMCell(radius=1.0 + 1e-6,
                              nsample=3 * num_samples,
                              out_channels=128,
                              knn=knn,
                              pooling='max')
        cell2 = PointLSTMCell(radius=2.0 + 1e-6,
                              nsample=2 * num_samples,
                              out_channels=256,
                              knn=knn,
                              pooling='max')
        cell3 = PointLSTMCell(radius=4.0 + 1e-6,
                              nsample=1 * num_samples,
                              out_channels=512,
                              knn=knn,
                              pooling='max')

        # context
        states1 = None
        states2 = None
        states3 = None
        for i in range(int(seq_length / 2)):
            # 512
            xyz1, _, _, _ = sample_and_group(int(num_points / 2),
                                             radius=0.5 + 1e-6,
                                             nsample=num_samples,
                                             xyz=frames[i],
                                             points=None,
                                             knn=False,
                                             use_xyz=False)
            with tf.variable_scope('encoder_1', reuse=tf.AUTO_REUSE) as scope:
                states1 = cell1((xyz1, None), states1)
                s_xyz1, h_feat1, _ = states1
            # 256
            xyz2, feat2, _, _ = sample_and_group(int(num_points / 2 / 2),
                                                 radius=1.0 + 1e-6,
                                                 nsample=num_samples,
                                                 xyz=s_xyz1,
                                                 points=h_feat1,
                                                 knn=False,
                                                 use_xyz=False)
            feat2 = tf.reduce_max(feat2,
                                  axis=[2],
                                  keepdims=False,
                                  name='maxpool')
            with tf.variable_scope('encoder_2', reuse=tf.AUTO_REUSE) as scope:
                states2 = cell2((xyz2, feat2), states2)
                s_xyz2, h_feat2, _ = states2
            # 128
            xyz3, feat3, _, _ = sample_and_group(int(num_points / 2 / 2 / 2),
                                                 radius=2.0 + 1e-6,
                                                 nsample=num_samples,
                                                 xyz=s_xyz2,
                                                 points=h_feat2,
                                                 knn=False,
                                                 use_xyz=False)
            feat3 = tf.reduce_max(feat3,
                                  axis=[2],
                                  keepdims=False,
                                  name='maxpool')
            with tf.variable_scope('encoder_3', reuse=tf.AUTO_REUSE) as scope:
                states3 = cell3((xyz3, feat3), states3)

        # prediction
        predicted_motions = []
        predicted_frames = []
        input_frame = frames[int(seq_length / 2) - 1]
        for i in range(int(seq_length / 2), seq_length):
            # 512
            xyz1, _, _, _ = sample_and_group(int(num_points / 2),
                                             radius=0.5 + 1e-6,
                                             nsample=num_samples,
                                             xyz=input_frame,
                                             points=None,
                                             knn=False,
                                             use_xyz=False)
            with tf.variable_scope('decoder_1', reuse=tf.AUTO_REUSE) as scope:
                states1 = cell1((xyz1, None), states1)
                s_xyz1, h_feat1, _ = states1
            # 256
            xyz2, feat2, _, _ = sample_and_group(int(num_points / 2 / 2),
                                                 radius=1.0 + 1e-6,
                                                 nsample=num_samples,
                                                 xyz=s_xyz1,
                                                 points=h_feat1,
                                                 knn=False,
                                                 use_xyz=False)
            feat2 = tf.reduce_max(feat2,
                                  axis=[2],
                                  keepdims=False,
                                  name='maxpool')
            with tf.variable_scope('decoder_2', reuse=tf.AUTO_REUSE) as scope:
                states2 = cell2((xyz2, feat2), states2)
                s_xyz2, h_feat2, _ = states2
            # 128
            xyz3, feat3, _, _ = sample_and_group(int(num_points / 2 / 2 / 2),
                                                 radius=2.0 + 1e-6,
                                                 nsample=num_samples,
                                                 xyz=s_xyz2,
                                                 points=h_feat2,
                                                 knn=False,
                                                 use_xyz=False)
            feat3 = tf.reduce_max(feat3,
                                  axis=[2],
                                  keepdims=False,
                                  name='maxpool')
            with tf.variable_scope('decoder_3', reuse=tf.AUTO_REUSE) as scope:
                states3 = cell3((xyz3, feat3), states3)
                s_xyz3, h_feat3, _ = states3

            with tf.variable_scope('fp', reuse=tf.AUTO_REUSE) as scope:
                l2_feat = pointnet_fp_module(xyz2,
                                             xyz3,
                                             h_feat2,
                                             h_feat3,
                                             mlp=[256],
                                             last_mlp_activation=True,
                                             scope='fp2')
                l1_feat = pointnet_fp_module(xyz1,
                                             xyz2,
                                             h_feat1,
                                             l2_feat,
                                             mlp=[256],
                                             last_mlp_activation=True,
                                             scope='fp1')
                l0_feat = pointnet_fp_module(input_frame,
                                             xyz1,
                                             None,
                                             l1_feat,
                                             mlp=[256],
                                             last_mlp_activation=True,
                                             scope='fp0')

            with tf.variable_scope('fc', reuse=tf.AUTO_REUSE) as scope:
                predicted_motion = tf.layers.conv1d(
                    inputs=l0_feat,
                    filters=128,
                    kernel_size=1,
                    strides=1,
                    padding='valid',
                    data_format='channels_last',
                    activation=tf.nn.relu,
                    name='fc1')
                predicted_motion = tf.layers.conv1d(
                    inputs=predicted_motion,
                    filters=3,
                    kernel_size=1,
                    strides=1,
                    padding='valid',
                    data_format='channels_last',
                    activation=None,
                    name='fc2')

            predicted_motions.append(predicted_motion)
            input_frame += predicted_motion
            predicted_frames.append(input_frame)

        # loss
        if is_training:
            self.loss = self.emd = self.cd = 0
            for i in range(int(seq_length / 2)):
                match = tf_approxmatch.approx_match(
                    frames[i + int(seq_length / 2)], predicted_frames[i])
                emd_distance = tf.reduce_mean(
                    tf_approxmatch.match_cost(frames[i + int(seq_length / 2)],
                                              predicted_frames[i], match))
                loss_emd = emd_distance
                self.emd += loss_emd

                dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
                    predicted_frames[i], frames[i + int(seq_length / 2)])
                loss_cd = tf.reduce_mean(dists_forward + dists_backward)
                self.cd += loss_cd

                self.loss += (alpha * loss_cd + beta * loss_emd)

            self.cd /= int(seq_length / 2)
            self.emd /= (int(seq_length / 2) * num_points)

            self.loss /= int(seq_length / 2)

            params = tf.trainable_variables()
            gradients = tf.gradients(self.loss, params)
            clipped_gradients, norm = tf.clip_by_global_norm(
                gradients, max_gradient_norm)
            self.train_op = tf.train.AdamOptimizer(
                learning_rate).apply_gradients(zip(clipped_gradients, params),
                                               global_step=self.global_step)

        self.predicted_motions = tf.stack(values=predicted_motions, axis=1)
        self.predicted_frames = tf.stack(values=predicted_frames, axis=1)

        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
def build_graph(resourceid, reg_weight=0.001):
    with tf.device('/gpu:%d' % resourceid):
        tflearn.init_graph(seed=1029,
                           num_cores=2,
                           gpu_memory_fraction=0.9,
                           soft_placement=True)
        img_inp = tf.placeholder(tf.float32,
                                 shape=(BATCH_SIZE, HEIGHT, WIDTH, 1),
                                 name='img_inp')
        pt_gt = tf.placeholder(tf.float32,
                               shape=(BATCH_SIZE, POINTCLOUDSIZE, 3),
                               name='pt_gt')
        label = tf.placeholder(tf.float32,
                               shape=(BATCH_SIZE, 13),
                               name='label')

        x = img_inp
        #192 256
        x = tflearn.layers.conv.conv_2d(x,
                                        16, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        16, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x0 = x
        with tf.variable_scope('transform_net1') as sc:
            x_d = tflearn.layers.conv.max_pool_2d(x, 8, strides=8)
            transform = input_transform_net(
                tf.reshape(x_d, (BATCH_SIZE, 4096, 3)),
                is_training=tf.constant(True, dtype=tf.bool),
                bn_decay=0.5,
                K=3)
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #96 128
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x1 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #48 64
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x2 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #24 32
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x3 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (5, 5),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #12 16
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x4 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (5, 5),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #6 8
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x5 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (5, 5),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #3 4
        x_additional = tflearn.layers.core.fully_connected(x,
                                                           2048,
                                                           activation='relu',
                                                           weight_decay=1e-3,
                                                           regularizer='L2')
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  256, [5, 5], [6, 8],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #6 8
        x5 = tflearn.layers.conv.conv_2d(x5,
                                         256, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x5))
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x5 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  128, [5, 5], [12, 16],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #12 16
        x4 = tflearn.layers.conv.conv_2d(x4,
                                         128, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x4))
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x4 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  64, [5, 5], [24, 32],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #24 32
        x3 = tflearn.layers.conv.conv_2d(x3,
                                         64, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x3))
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x3 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  32, [5, 5], [48, 64],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #48 64
        x2 = tflearn.layers.conv.conv_2d(x2,
                                         32, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x2))
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x2 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  16, [5, 5], [96, 128],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #96 128
        x1 = tflearn.layers.conv.conv_2d(x1,
                                         16, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x1))
        x = tflearn.layers.conv.conv_2d(x,
                                        16, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=2,
                                        activation='linear',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #48 64
        x2 = tflearn.layers.conv.conv_2d(x2,
                                         32, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x2))
        x = tflearn.layers.conv.conv_2d(x,
                                        32, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x2 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=2,
                                        activation='linear',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #24 32
        x3 = tflearn.layers.conv.conv_2d(x3,
                                         64, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x3))
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x3 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (5, 5),
                                        strides=2,
                                        activation='linear',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #12 16
        x4 = tflearn.layers.conv.conv_2d(x4,
                                         128, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x4))
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x4 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (5, 5),
                                        strides=2,
                                        activation='linear',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #6 8
        x5 = tflearn.layers.conv.conv_2d(x5,
                                         256, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x5))
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x5 = x
        x = tflearn.layers.conv.conv_2d(x,
                                        512, (5, 5),
                                        strides=2,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        #3 4
        x_additional = tflearn.layers.core.fully_connected(x_additional,
                                                           2048,
                                                           activation='linear',
                                                           weight_decay=1e-4,
                                                           regularizer='L2')
        x_additional = tf.nn.relu(
            tf.add(
                x_additional,
                tflearn.layers.core.fully_connected(x,
                                                    2048,
                                                    activation='linear',
                                                    weight_decay=1e-3,
                                                    regularizer='L2')))
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  256, [5, 5], [6, 8],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #6 8
        x5 = tflearn.layers.conv.conv_2d(x5,
                                         256, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x5))
        x = tflearn.layers.conv.conv_2d(x,
                                        256, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x5 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  128, [5, 5], [12, 16],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #12 16
        x4 = tflearn.layers.conv.conv_2d(x4,
                                         128, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x4))
        x = tflearn.layers.conv.conv_2d(x,
                                        128, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x4 = x
        x = tflearn.layers.conv.conv_2d_transpose(x,
                                                  64, [5, 5], [24, 32],
                                                  strides=2,
                                                  activation='linear',
                                                  weight_decay=1e-5,
                                                  regularizer='L2')
        #24 32
        x3 = tflearn.layers.conv.conv_2d(x3,
                                         64, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-5,
                                         regularizer='L2')
        x = tf.nn.relu(tf.add(x, x3))
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tflearn.layers.conv.conv_2d(x,
                                        64, (3, 3),
                                        strides=1,
                                        activation='relu',
                                        weight_decay=1e-5,
                                        regularizer='L2')

        x_additional = tflearn.layers.core.fully_connected(x_additional,
                                                           1024,
                                                           activation='relu',
                                                           weight_decay=1e-3,
                                                           regularizer='L2')

        x_additional = tflearn.layers.core.fully_connected(x_additional,
                                                           256 * 3,
                                                           activation='linear',
                                                           weight_decay=1e-3,
                                                           regularizer='L2')

        x_additional = tf.reshape(x_additional, (BATCH_SIZE, 256, 3))
        x = tflearn.layers.conv.conv_2d(x,
                                        3, (3, 3),
                                        strides=1,
                                        activation='linear',
                                        weight_decay=1e-5,
                                        regularizer='L2')
        x = tf.reshape(x, (BATCH_SIZE, 32 * 24, 3))
        x = tf.concat([x_additional, x], 1)
        x = tf.reshape(x, (BATCH_SIZE, OUTPUTPOINTS, 3))
        x = tf.matmul(x, transform)
        K = transform.get_shape()[1].value
        mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2,
                                                                      1]))
        mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
        mat_diff_loss = tf.nn.l2_loss(mat_diff)

        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            pt_gt, x)
        mindist = dists_forward
        dist0 = mindist[0, :]
        dists_forward = tf.reduce_mean(dists_forward)
        dists_backward = tf.reduce_mean(dists_backward)
        loss_nodecay = (dists_forward + dists_backward / 2.0) * 10000
        loss = loss_nodecay + tf.add_n(
            tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        ) * 0.1 + mat_diff_loss * reg_weight
        batchno = tf.Variable(0, dtype=tf.int32)
        optimizer = tf.train.AdamOptimizer(
            3e-5 * BATCH_SIZE / FETCH_BATCH_SIZE).minimize(loss,
                                                           global_step=batchno)
        batchnoinc = batchno.assign(batchno + 1)
    return img_inp, x, pt_gt, loss, optimizer, batchno, batchnoinc, mindist, loss_nodecay, dists_forward, dists_backward, dist0