small_transform = tf.constant(config.camera_params['cam_transform_02_inv'], dtype = tf.float32)

X2_pooled = tf.nn.max_pool(X2, ksize=[1,5,5,1], strides=[1,1,1,1], padding="SAME")

net = global_agg_net.Nets(X1, X2_pooled, phase_rgb, phase, fc_keep_prob)
output_vectors, weight_summaries = net.build()

# se(3) -> SE(3) for the whole batch
# output_vectors_ft = tf.map_fn(lambda x:transform_functions.RV2RM(expected_transforms[x], output_vectors[x]), elems=tf.range(0, batch_size, 1), dtype=tf.float32)
# output_vectors_ft = tf.reshape(output_vectors_ft, shape=(batch_size, 6))
predicted_transforms = tf.map_fn(lambda x:exponential_map_single(output_vectors[x]), elems=tf.range(0, batch_size * time_step, 1), dtype=tf.float32)

# predicted_transforms = tf.concat([expected_transforms[:, :3, :3], tf.reshape(predicted_transforms[:, :3, 3], shape=[batch_size, 3, 1])], axis=-1)

# transforms depth maps by the predicted transformation
depth_maps_predicted, cloud_pred = tf.map_fn(lambda x:at3._simple_transformer(X2_pooled[x,:,:,0]*40.0 + 40.0, predicted_transforms[x], K_final, small_transform), elems = tf.range(0, batch_size * time_step, 1), dtype = (tf.float32, tf.float32))

# transforms depth maps by the expected transformation
depth_maps_expected, cloud_exp = tf.map_fn(lambda x:at3._simple_transformer(X2_pooled[x,:,:,0]*40.0 + 40.0, expected_transforms[x], K_final, small_transform), elems = tf.range(0, batch_size * time_step, 1), dtype = (tf.float32, tf.float32))

# photometric loss between predicted and expected transformation
photometric_loss = tf.nn.l2_loss(tf.subtract((depth_maps_expected[:,10:-10,10:-10] - 40.0)/40.0, (depth_maps_predicted[:,10:-10,10:-10] - 40.0)/40.0))

# point cloud distance between point clouds
cloud_loss = model_utils.get_cd_loss(cloud_pred, cloud_exp)
# earth mover's distance between point clouds
emd_loss = model_utils.get_emd_loss(cloud_pred, cloud_exp)
# regression loss
output_vectors_exp = tf.map_fn(lambda x: transform_functions.convert(expected_transforms[x]), elems=tf.range(0, batch_size, 1), dtype=tf.float32)
output_vectors_exp = tf.squeeze(output_vectors_exp)
tr_loss = tf.norm(output_vectors[:, :3] - output_vectors_exp[:, :3], axis=1)
示例#2
0
                                          ksize=[1, 5, 5, 1],
                                          strides=[1, 1, 1, 1],
                                          padding="SAME")

output_vectors, weight_summaries = global_agg_net.End_Net_Out(
    X1, phase_rgb, X2_pooled, phase, keep_prob)

# se(3) -> SE(3) for the whole batch
predicted_transforms = tf.map_fn(
    lambda x: exponential_map_single(output_vectors[x]),
    elems=tf.range(0, batch_size, 1),
    dtype=tf.float32)

# transforms depth maps by the predicted transformation
depth_maps_predicted, cloud_pred = tf.map_fn(lambda x: at3._simple_transformer(
    X2_pooled[x, :, :, 0] * 40.0 + 40.0, predicted_transforms[x], K_final,
    small_transform),
                                             elems=tf.range(0, batch_size, 1),
                                             dtype=(tf.float32, tf.float32))

# transforms depth maps by the expected transformation
depth_maps_expected, cloud_exp = tf.map_fn(lambda x: at3._simple_transformer(
    X2_pooled[x, :, :, 0] * 40.0 + 40.0, expected_transforms[x], K_final,
    small_transform),
                                           elems=tf.range(0, batch_size, 1),
                                           dtype=(tf.float32, tf.float32))

# photometric loss between predicted and expected transformation
photometric_loss = tf.nn.l2_loss(
    tf.subtract((depth_maps_expected[:, 10:-10, 10:-10] - 40.0) / 40.0,
                (depth_maps_predicted[:, 10:-10, 10:-10] - 40.0) / 40.0))