def _test_rotation_formats(self, dtype, inverse_depth, normalize_flow):

        # random depth map and camera pose
        depth = np.random.uniform(5,10, (1,1,6,12)).astype(dtype)
        if inverse_depth:
            depth = 1/depth
        rotation = np.random.uniform(0.0,0.05, (1,3)).astype(dtype)
        translation = (np.array([[1,0,0]]) + np.random.uniform(-0.2,0.2, (1,3))).astype(dtype)
        intrinsics = np.array([[1,1,0.5,0.5]]).astype(dtype)

        flow = ops.depth_to_flow(
            depth=depth, 
            intrinsics=intrinsics,
            rotation=rotation, 
            translation=translation, 
            inverse_depth=inverse_depth, 
            normalize_flow=normalize_flow,).eval()

        rotation_aa = rotation
        rotation_R = angleaxis_to_rotation_matrix(rotation[0])[np.newaxis,:,:]
        rotation_q = angleaxis_to_quaternion(rotation[0])[np.newaxis,:]

        computed_depth_aa = ops.flow_to_depth2(
            flow=flow, 
            intrinsics=intrinsics,
            rotation=rotation_aa, 
            translation=translation, 
            inverse_depth=inverse_depth, 
            normalized_flow=normalize_flow,
            rotation_format='angleaxis3').eval()
        
        computed_depth_R = ops.flow_to_depth2(
            flow=flow, 
            intrinsics=intrinsics,
            rotation=rotation_R, 
            translation=translation, 
            inverse_depth=inverse_depth, 
            normalized_flow=normalize_flow,
            rotation_format='matrix').eval()

        computed_depth_q = ops.flow_to_depth2(
            flow=flow, 
            intrinsics=intrinsics,
            rotation=rotation_q, 
            translation=translation, 
            inverse_depth=inverse_depth, 
            normalized_flow=normalize_flow,
            rotation_format='quaternion').eval()

        self.assertAllClose(computed_depth_aa, computed_depth_R, rtol=1e-4, atol=1e-4)
        self.assertAllClose(depth, computed_depth_q, rtol=1e-4, atol=1e-4)
def depth_from_flow_and_motion(inputs):
    flow = inputs[0]
    motion = inputs[1]

    intrinsics = [0.89115971, 1.18821287, 0.5, 0.5]
    flow_nchw = tf.transpose(flow, [0, 3, 1, 2])
    rotation = tf.slice(motion, [0,0], [-1, 3])
    translation = tf.slice(motion, [0,3], [-1, 3])

    depth_nchw = lmbspecialops.flow_to_depth2(flow_nchw, intrinsics, rotation, translation, rotation_format='angleaxis3', inverse_depth=True, normalized_flow=False)

    return tf.transpose(flow_ncwh, [0, 2, 3, 1])
    def test_shape_no_batch_dimension(self):
        dtype = np.float32

        flow = np.zeros((2,6,12)).astype(dtype)
        rotation = np.zeros((3,)).astype(dtype)
        translation = np.array([1,0,0]).astype(dtype)
        intrinsics = np.array([1,1,0.5,0.5]).astype(dtype)

        depth = np.zeros((1,1,6,12)).astype(dtype)
        computed_depth = ops.flow_to_depth2(
            flow=flow, 
            intrinsics=intrinsics,
            rotation=rotation, 
            translation=translation,)
        self.assertShapeEqual(depth, computed_depth)
    def test_shape_batch(self):
        dtype = np.float32

        batch = 7

        flow = np.zeros((batch,2,6,12)).astype(dtype)
        rotation = np.zeros((batch,3)).astype(dtype)
        translation = np.zeros((batch,3)).astype(dtype)
        intrinsics = np.zeros((batch,4)).astype(dtype)

        depth = np.zeros((batch,1,6,12)).astype(dtype)
        computed_depth = ops.flow_to_depth2(
            flow=flow, 
            intrinsics=intrinsics,
            rotation=rotation, 
            translation=translation,)
        self.assertShapeEqual(depth, computed_depth)
    def test_shape_batch_mismatch(self):
        dtype = np.float32

        batch = np.array([7,7,7,5],dtype=np.int32)

        for i in range(4):
            batch = np.roll(batch,1)
            print(batch)
            flow = np.zeros((batch[0],2,6,12)).astype(dtype)
            rotation = np.zeros((batch[1],3)).astype(dtype)
            translation = np.zeros((batch[2],3)).astype(dtype)
            intrinsics = np.zeros((batch[3],4)).astype(dtype)

            with self.assertRaises(ValueError) as cm:
                computed_depth = ops.flow_to_depth2(
                    flow=flow, 
                    intrinsics=intrinsics,
                    rotation=rotation, 
                    translation=translation,)
            self.assertStartsWith(str(cm.exception), 'Dimensions must be equal')
Example #6
0
def depthmotion_block(image_pair,
                      image2_2,
                      prev_flow2,
                      prev_flowconf2,
                      prev_rotation=None,
                      prev_translation=None,
                      intrinsics=None,
                      data_format='channels_first',
                      kernel_regularizer=None):
    """Creates a depth and motion network
    
    image_pair: Tensor
        Image pair concatenated along the channel axis.
        The tensor format is NCHW with C == 6.

    image2_2: Tensor
        Second image at resolution level 2

    prev_flow2: Tensor
        The output of the flow network. Contains only the flow (2 channels)

    prev_flowconf2: Tensor
        The output of the flow network. Contains flow and flow confidence (4 channels)

    prev_rotation: Tensor
        The previously predicted rotation.
        
    prev_translaion: Tensor
        The previously predicted translation.

    intrinsics: Tensor
        Tensor with the intrinsic camera parameters
        Only required if prev_rotation and prev_translation is not None.
        
    Returns a dictionary with the predictions for depth, normals and motion
    """
    conv_params = {
        'data_format': data_format,
        'kernel_regularizer': kernel_regularizer
    }
    fc_params = {}

    # contracting part
    conv1 = convrelu2(name='conv1',
                      inputs=image_pair,
                      num_outputs=(24, 32),
                      kernel_size=9,
                      stride=2,
                      **conv_params)

    conv2 = convrelu2(name='conv2',
                      inputs=conv1,
                      num_outputs=32,
                      kernel_size=7,
                      stride=2,
                      **conv_params)
    # create extra inputs
    if data_format == 'channels_first':
        image2_2_warped = sops.warp2d(image2_2,
                                      prev_flow2,
                                      normalized=True,
                                      border_mode='value')
    else:
        prev_flow2_nchw = convert_NHWC_to_NCHW(prev_flow2)
        image2_2_warped = convert_NCHW_to_NHWC(
            sops.warp2d(convert_NHWC_to_NCHW(image2_2),
                        prev_flow2_nchw,
                        normalized=True,
                        border_mode='value'))

    extra_inputs = [image2_2_warped, prev_flowconf2]
    if (not prev_rotation is None) and (not prev_translation is None) and (
            not intrinsics is None):
        if data_format == 'channels_first':
            depth_from_flow = sops.flow_to_depth2(
                flow=prev_flow2,
                intrinsics=intrinsics,
                rotation=prev_rotation,
                translation=prev_translation,
                normalized_flow=True,
                inverse_depth=True,
            )
        else:
            depth_from_flow = convert_NCHW_to_NHWC(
                sops.flow_to_depth2(
                    flow=prev_flow2_nchw,
                    intrinsics=intrinsics,
                    rotation=prev_rotation,
                    translation=prev_translation,
                    normalized_flow=True,
                    inverse_depth=True,
                ))
        depth_from_flow = tf.clip_by_value(depth_from_flow, 0.0, 50.0)

        extra_inputs.append(depth_from_flow)

    concat_extra_inputs = tf.stop_gradient(
        tf.concat(extra_inputs,
                  axis=1 if data_format == 'channels_first' else 3))
    conv_extra_inputs = convrelu2(name='conv2_extra_inputs',
                                  inputs=concat_extra_inputs,
                                  num_outputs=32,
                                  kernel_size=3,
                                  stride=1,
                                  **conv_params)
    conv2_concat = tf.concat((conv2, conv_extra_inputs),
                             axis=1 if data_format == 'channels_first' else 3)
    conv2_1 = convrelu2(name='conv2_1',
                        inputs=conv2_concat,
                        num_outputs=64,
                        kernel_size=3,
                        stride=1,
                        **conv_params)

    conv3 = convrelu2(name='conv3',
                      inputs=conv2_1,
                      num_outputs=(96, 128),
                      kernel_size=5,
                      stride=2,
                      **conv_params)
    conv3_1 = convrelu2(name='conv3_1',
                        inputs=conv3,
                        num_outputs=128,
                        kernel_size=3,
                        stride=1,
                        **conv_params)

    conv4 = convrelu2(name='conv4',
                      inputs=conv3_1,
                      num_outputs=(192, 256),
                      kernel_size=5,
                      stride=2,
                      **conv_params)
    conv4_1 = convrelu2(name='conv4_1',
                        inputs=conv4,
                        num_outputs=256,
                        kernel_size=3,
                        stride=1,
                        **conv_params)

    conv5 = convrelu2(name='conv5',
                      inputs=conv4_1,
                      num_outputs=384,
                      kernel_size=3,
                      stride=2,
                      **conv_params)
    conv5_1 = convrelu2(name='conv5_1',
                        inputs=conv5,
                        num_outputs=384,
                        kernel_size=3,
                        stride=1,
                        **conv_params)

    dense_slice_shape = conv5_1.get_shape().as_list()
    if data_format == 'channels_first':
        dense_slice_shape[1] = 96
    else:
        dense_slice_shape[-1] = 96
    units = 1
    for i in range(1, len(dense_slice_shape)):
        units *= dense_slice_shape[i]
    dense5 = tf.layers.dense(tf.contrib.layers.flatten(
        tf.slice(conv5_1, [0, 0, 0, 0], dense_slice_shape)),
                             units=units,
                             activation=myLeakyRelu,
                             kernel_initializer=default_weights_initializer(),
                             kernel_regularizer=kernel_regularizer,
                             name='dense5')
    print(dense5)
    conv5_1_dense5 = tf.concat(
        (conv5_1, tf.reshape(dense5, dense_slice_shape)),
        axis=1 if data_format == 'channels_first' else 3)

    # motion prediction part
    motion_conv3 = convrelu2(name='motion_conv3',
                             inputs=conv2_1,
                             num_outputs=64,
                             kernel_size=5,
                             stride=2,
                             **conv_params)
    motion_conv4 = convrelu2(name='motion_conv4',
                             inputs=motion_conv3,
                             num_outputs=64,
                             kernel_size=5,
                             stride=2,
                             **conv_params)
    motion_conv5a = convrelu2(name='motion_conv5a',
                              inputs=motion_conv4,
                              num_outputs=64,
                              kernel_size=3,
                              stride=2,
                              **conv_params)

    motion_conv5b = convrelu(
        name='motion_conv5b',
        inputs=conv5_1_dense5,
        num_outputs=64,
        kernel_size=3,
        strides=1,
        **conv_params,
    )
    motion_conv5_1 = tf.concat(
        (motion_conv5a, motion_conv5b),
        axis=1 if data_format == 'channels_first' else 3)

    if data_format == 'channels_last':
        motion_conv5_1 = convert_NHWC_to_NCHW(motion_conv5_1)
    motion_fc1 = tf.layers.dense(
        name='motion_fc1',
        inputs=tf.contrib.layers.flatten(motion_conv5_1),
        units=1024,
        activation=myLeakyRelu,
        kernel_regularizer=kernel_regularizer,
        **fc_params,
    )
    motion_fc2 = tf.layers.dense(
        name='motion_fc2',
        inputs=motion_fc1,
        units=128,
        activation=myLeakyRelu,
        kernel_regularizer=kernel_regularizer,
        **fc_params,
    )
    predict_motion_scale = tf.layers.dense(
        name='motion_fc3',
        inputs=motion_fc2,
        units=7,
        activation=None,
        kernel_regularizer=kernel_regularizer,
        **fc_params,
    )

    predict_rotation, predict_translation, predict_scale = tf.split(
        value=predict_motion_scale, num_or_size_splits=[3, 3, 1], axis=1)

    # expanding part
    with tf.variable_scope('refine4'):
        concat4 = _refine(
            inp=conv5_1,
            num_outputs=256,
            features_direct=conv4_1,
            **conv_params,
        )

    with tf.variable_scope('refine3'):
        concat3 = _refine(
            inp=concat4,
            num_outputs=128,
            features_direct=conv3_1,
            **conv_params,
        )

    with tf.variable_scope('refine2'):
        concat2 = _refine(
            inp=concat3,
            num_outputs=64,
            features_direct=conv2_1,
            **conv_params,
        )

    with tf.variable_scope('predict_depthnormal2'):
        predict_depth2, predict_normal2 = _predict_depthnormal(
            concat2, predicted_scale=predict_scale, **conv_params)

    return {
        'predict_depth2': predict_depth2,
        'predict_normal2': predict_normal2,
        'predict_rotation': predict_rotation,
        'predict_translation': predict_translation,
        'predict_scale': predict_scale,
    }