Exemplo n.º 1
0
def SLoss(q_true, q_pred, M, n_classes, no_of_points):
    losses = []
    M = tf.transpose(M, [0, 2, 1])
    for i in range(0, 4 * n_classes, 4):
        GTR = tfq.Quaternion(tf.slice(q_true, [0, i], [1, 4])).as_rotation_matrix()
        ESTR = tfq.Quaternion(tf.slice(q_pred, [0, i], [1, 4])).as_rotation_matrix()
        ALL_ESTR = tf.matmul(ESTR[0], M[0])
        ALL_GTR = tf.matmul(GTR[0], M[0])
        for k in range(1, no_of_points):
            ALL_ESTR = tf.concat([ALL_ESTR, tf.matmul(ESTR[0], M[k])], axis=0)
            ALL_GTR = tf.concat([ALL_GTR, tf.matmul(GTR[0], M[k])], axis=0)
        totloss = []
        if no_of_points > 1:
            for j in range(no_of_points):
                subloss = tf.multiply(tf.norm(tf.subtract(ALL_ESTR[j], ALL_GTR[0])), tf.norm(tf.subtract(ALL_ESTR[j], ALL_GTR[0])))
                for n in range(no_of_points):
                    subloss = tf.minimum(tf.multiply(tf.norm(tf.subtract(ALL_ESTR[j], ALL_GTR[n])), tf.norm(tf.subtract(ALL_ESTR[j], ALL_GTR[n]))), subloss)
                totloss.append(subloss)
        else:
            subloss = tf.multiply(tf.norm(tf.subtract(ALL_ESTR, ALL_GTR)), tf.norm(tf.subtract(ALL_ESTR, ALL_GTR)))
            totloss.append(subloss)
        retloss = totloss[0]
        for l in range(1, n_classes):
            retloss = tf.concat([retloss, totloss[l]], axis=0)
        loss = tf.divide(tf.reduce_sum(retloss), 2 * no_of_points)
        losses.append(loss)
    finalLoss = losses[0]
    for f in range(1, n_classes):
        finalLoss = tf.concat([finalLoss, losses[f]], axis=0)
    return tf.reduce_mean(finalLoss, name="pose_loss")
Exemplo n.º 2
0
def rotate_quat(q1, q2):
    """
    Applies the rotation described by q2 to q1

    :param q1: Initial quaternion
    :param q2: Rotation quaternion
    :return: The rotated quaternion
    """

    if any([isinstance(q1, tf.Tensor), isinstance(q2, tf.Tensor)]):
        return tfq.Quaternion(q2) * tfq.Quaternion(q1)

    if len(np.shape(q1)) == 2:
        if len(np.shape(q2)) == 2:
            return np.array([(Quaternion(q2_i) * Quaternion(q1_i)).elements
                             for q1_i, q2_i in zip(q1, q2)])
        elif len(np.shape(q2)) == 1 and len(q2) == len(q1):
            np.array([(Quaternion(q2) * Quaternion(q1_i)).elements
                      for q1_i in q1])
        else:
            raise TypeError(
                "If the initial quaternion is a matrix, there must only be 1 rotation quaternion, "
                "or exactly as many rotation quaternions as initial quaternions"
            )

    elif len(np.shape(q1)) == 1:
        if len(np.shape(q2)) == 1:
            return Quaternion(q2) * Quaternion(q1)
        else:
            raise TypeError(
                "If there is only an initial quaternion, there must only be one rotation quaternion"
            )
    else:
        raise TypeError(
            "The initial quaternion must be a vector or a 2D array")
Exemplo n.º 3
0
 def test___ne__(self):
     ref = tf.constant(np.arange(4), dtype=tf.float32)
     ref2 = tf.constant(np.arange(2, 6), dtype=tf.float32)
     ref_q = tfq.Quaternion(ref)
     with self.test_session():
         self.assertAllFalse(ref_q != ref)
         self.assertAllFalse(ref_q != ref_q)
         self.assertAllFalse(ref_q != tfq.Quaternion(ref))
         self.assertAllTrue(ref_q != ref2)
def frob_loss3r(y_true, y_pred):
    y_true = K.reshape(y_true, [-1, y_pred.shape[-1]])
    y_true = tfq.Quaternion(y_true)
    m_true = y_true.as_rotation_matrix()

    y_pred = tfq.Quaternion(y_pred)
    m_pred = y_pred.as_rotation_matrix()
    return settings.BETA * tf.norm(
        (m_true - m_pred), ord='fro', axis=[-2, -1], keepdims=True)
Exemplo n.º 5
0
 def test___init__(self):
     ref = tfq.Quaternion([1.0, 0.0, 0.0, 0.0])
     val = (1.0, 0.0, 0.0, 0.0)
     variable = tf.Variable(val)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         #default constructor
         self.assertAllEqual(tfq.Quaternion(), ref)
         # from variable
         self.assertAllEqual(tfq.Quaternion(variable), ref)
         # from constant
         self.assertAllEqual(tfq.Quaternion(tf.constant(val)), ref)
         # from np.array
         self.assertAllEqual(tfq.Quaternion(np.array(val)), ref)
         # from list
         self.assertAllEqual(tfq.Quaternion([1.0, 0.0, 0.0, 0.0]), ref)
         self.assertAllEqual(tfq.Quaternion([1, 0, 0, 0]), ref)
         # from tuple
         self.assertAllEqual(tfq.Quaternion(val), ref)
         # from Quaternion
         self.assertAllEqual(tfq.Quaternion(ref), ref)
         # wrong shape
         self.assertRaises(ValueError, tfq.Quaternion, [[1, 0]])
         # wrong type
         self.assertRaises(TypeError, tfq.Quaternion, val, dtype=tf.int32)
         self.assertRaises(TypeError,
                           tfq.Quaternion, [1, 0, 0, 0],
                           dtype=tf.int32)
Exemplo n.º 6
0
def SLoss_accuracy(q_true, q_pred, n_classes):
    losses = []
    for i in range(0, 4 * n_classes, 4):
        GTR = tfq.Quaternion(tf.slice(q_true, [0, i], [1, 4])).as_rotation_matrix()
        ESTR = tfq.Quaternion(tf.slice(q_pred, [0, i], [1, 4])).as_rotation_matrix()
        equals = tf.equal(GTR, ESTR)
        losses.append(tf.reduce_mean(tf.cast(equals, dtype=tf.float32)))
    acc = losses[0]
    for l in range(1, len(losses)):
        acc = tf.add(acc, losses[l])
    return acc
Exemplo n.º 7
0
 def test_abs(self):
     delta = 0.00001
     q1 = tfq.Quaternion((1, 2, 3, 4))
     q2 = tfq.Quaternion((0, 0, 0, 0))
     with self.test_session(use_gpu=True):
         self.assertAlmostEqual(q1.abs(), [5.47722], delta=delta)
         self.assertAlmostEqual((q1 * -1.0).abs(), [5.47722], delta=delta)
         self.assertAllEqual(q2.abs(), [0.0])
         self.assertAllClose(tfq.Quaternion([q1.value(),
                                             q2.value()]).abs(),
                             [[5.47722], [0.0]],
                             atol=delta)
Exemplo n.º 8
0
 def test_quaternion_multiply(self):
     qs_np, qs_tf = get_quaternions()
     with self.test_session() as sess:
         for q in qs_tf[QS_FINITE]:
             self.assertAllEqual(q * qs_tf[Q_1], q)
             self.assertAllEqual(q * 1.0, q)
             self.assertAllEqual(1.0 * q, q)
             self.assertAllEqual(0.0 * q, np.zeros(q.shape))
             self.assertAllEqual(0.0 * q, q * 0.0)
         # Check multiplication with scalar
         for s in [-3., -2.3, -1.2, -1., 0., 1.0, 1.2, 2.3, 3.]:
             for q in qs_tf[QS_FINITE]:
                 q_w, q_x, q_y, q_z = tf.unstack(q, axis=-1)
                 s_times_q = [s * q_w, s * q_x, s * q_y, s * q_z]
                 s_times_q = tfq.Quaternion(tf.stack(s_times_q, axis=-1))
                 self.assertAllEqual(q * s, s_times_q)
                 self.assertAllEqual(s * q, q * s)
         # Check linearity (use placeholders to speed this up)
         pl1, pl2, pl3 = [
             tf.placeholder(tf.float32, (None, 4)) for i in range(3)
         ]
         q1, q2, q3 = [tfq.Quaternion(pl) for pl in [pl1, pl2, pl3]]
         ops = [
             q1 * (q2 + q3),
             (q1 * q2) + (q1 * q3),  # check 1
             (q1 + q2) * q3,
             (q1 * q3) + (q2 * q3)
         ]  # check 2
         triplets = itertools.permutations(qs_np[QS_FINITENONMULTI], 3)
         for q1np, q2np, q3np in triplets:
             q1np = q1np.reshape(1, 4) if q1np.shape == (4, ) else q1np
             q2np = q2np.reshape(1, 4) if q2np.shape == (4, ) else q2np
             q3np = q3np.reshape(1, 4) if q3np.shape == (4, ) else q3np
             fd = {pl1: q1np, pl2: q2np, pl3: q3np}
             result = sess.run(ops, feed_dict=fd)
             # checks q1 * (q2 + q3) == (q1 * q2) + (q1 * q3),
             self.assertAllClose(result[0], result[1])
             # checks (q1 + q2) * q3 == (q1 * q3) + (q2 * q3)
             self.assertAllClose(result[2], result[3])
         # Check the multiplication table
         for q in [qs_tf[Q_1], qs_tf[X], qs_tf[Y], qs_tf[Z]]:
             self.assertAllEqual(qs_tf[Q_1] * q, q)
             self.assertAllEqual(q * qs_tf[Q_1], q)
         self.assertAllEqual(qs_tf[X] * qs_tf[X], -qs_tf[Q_1])
         self.assertAllEqual(qs_tf[X] * qs_tf[Y], qs_tf[Z])
         self.assertAllEqual(qs_tf[X] * qs_tf[Z], -qs_tf[Y])
         self.assertAllEqual(qs_tf[Y] * qs_tf[X], -qs_tf[Z])
         self.assertAllEqual(qs_tf[Y] * qs_tf[Y], -qs_tf[Q_1])
         self.assertAllEqual(qs_tf[Y] * qs_tf[Z], qs_tf[X])
         self.assertAllEqual(qs_tf[Z] * qs_tf[X], qs_tf[Y])
         self.assertAllEqual(qs_tf[Z] * qs_tf[Y], -qs_tf[X])
         self.assertAllEqual(qs_tf[Z] * qs_tf[Z], -qs_tf[Q_1])
         self.assertAllEqual(qs_tf[Z] * qs_tf[Z], -qs_tf[Q_1])
Exemplo n.º 9
0
def rotate_vec(v, q):
    if any([isinstance(q, tf.Tensor), isinstance(v, tf.Tensor)]):
        return tf.map_fn(lambda x: tf.squeeze(tf.matmul(
            tfq.Quaternion(x[1]).as_rotation_matrix(),
            tf.expand_dims(x[0], axis=1)),
                                              axis=1), (v, q),
                         dtype=tf.float32)
    if len(np.shape(v)) == 2:
        if len(np.shape(q)) == 2:
            return np.array(
                [Quaternion(q_i).unit.rotate(v_i) for q_i, v_i in zip(q, v)])
        elif len(np.shape(q)) == 1 and len(q) == len(v):
            np.array([Quaternion(q).unit.rotate(v_i) for v_i in v])
        else:
            raise TypeError(
                "If the vector is a matrix, there must only be 1 quaternion, "
                "or exactly as many quaternions as vectors")

    elif len(np.shape(v)) == 1:
        if len(np.shape(q)) == 1:
            return Quaternion(q).unit.rotate(v)
        else:
            raise TypeError(
                "If there is only a vector, there must only be one quaternion")
    else:
        raise TypeError("v should be a vector or a 2D array")
Exemplo n.º 10
0
    def tf_scatter(self, v):
        """
        Scatter the given direction tensor v.

        Parameters
        ----------
        v : TF tensor, shape(?, 3)
            Direction vectors of the photons which are being scattered.

        Returns
        -------
        The scattered direction tensor of shape(?, 3).
        """
        # sample cos(theta)
        cosTs = 2 * self._uni_pdf.sample(tf.shape(v)[0])**(1 / 19) - 1
        cosT2s = tf.sqrt((cosTs + 1) / 2)
        sinT2s = tf.sqrt((1 - cosTs) / 2)

        ns = tf.transpose(
            self.tf_sample_normal_vectors(v) * tf.expand_dims(sinT2s, axis=-1))
        # ignore the fact that n could be parallel to v, what's the probability
        # of that happening?

        q = tfq.Quaternion(tf.transpose([cosT2s, ns[0], ns[1], ns[2]]))
        return tfq.rotate_vector_by_quaternion(q, v)
Exemplo n.º 11
0
 def true_transform(self, q, pos):
     rot = tfq.Quaternion(q).as_rotation_matrix()
     r1 = tf.concat([rot[:, 0], pos[:, 0:1]], axis=-1)
     r2 = tf.concat([rot[:, 1], pos[:, 1:2]], axis=-1)
     r3 = tf.concat([rot[:, 2], pos[:, 2:3]], axis=-1)
     r4 = [[0, 0, 0, 1] for _ in range(self.batch_size)]
     trans = tf.stack([r1, r2, r3, r4], axis=-1)
     return trans
Exemplo n.º 12
0
 def test___le__(self):
     ref = tf.constant(np.arange(4), dtype=tf.float32)
     ref2 = tf.constant(np.arange(2, 6), dtype=tf.float32)
     ref3 = tf.constant(np.arange(-2, 2), dtype=tf.float32)
     ref_q = tfq.Quaternion(ref)
     with self.test_session():
         self.assertAllTrue(ref <= ref)
         self.assertAllTrue(ref <= ref2)
         self.assertAllTrue(ref3 <= ref)
 def augment(points, segment_ids, training_batch_size):
     """Rotate a full scene around the z axis and scale on each axis."""
     scene_ids = segment_ids // 2
     bs = training_batch_size * 3
     rand_angle = tf.random_uniform([bs], maxval=(2 * pi))
     rot = tf.stack([[1.] * bs, [0.] * bs, [0.] * bs, rand_angle], axis=1)
     rotations = tfq.Quaternion(tf.gather(rot, scene_ids))
     points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
     return points
Exemplo n.º 14
0
def camera_loss(landmark1,landmark2,pose,data_dict):

    visibility = data_dict['visibility']

    quaternion = data_dict['quaternion']
    translation = data_dict['translation']
    depth = data_dict["depth"]


    quat_est = tfq.Quaternion(pose[:,0:4])
    quat_gt = tfq.Quaternion(quaternion)

    #Get gt and estimate landmark locations
    pred_lm_coord = tf.reverse(argmax_2d(pred_landmark),[1])
    gt_lm_coord = tf.reverse(argmax_2d(landmark),[1])
    
    #import pdb;pdb.set_trace()

    batch_index = tf.tile(tf.expand_dims(tf.range(tf.shape(pred_landmark)[0]), 1), [1, tf.shape(pred_landmark)[3]])
    index_gt = tf.concat([tf.expand_dims(batch_index,axis=2), tf.transpose(tf.reverse(gt_lm_coord,[1]),[0,2,1])], axis=2)
    index_pred = tf.concat([tf.expand_dims(batch_index,axis=2), tf.transpose(tf.reverse(pred_lm_coord,[1]),[0,2,1])], axis=2)


    gt_depth_val = tf.gather_nd(depth[:,:,:,0],index_gt)*100.0
    pred_depth_val = tf.gather_nd(depth[:,:,:,0],index_pred)*100.0
    
    ones = tf.ones([tf.shape(pred_landmark)[0], 1, tf.shape(pred_landmark)[3]])
    pred_lm_coord = tf.concat([tf.cast(pred_lm_coord,tf.float32),ones],axis=1)
    gt_lm_coord = tf.concat([tf.cast(gt_lm_coord,tf.float32),ones],axis=1)

    gt_cam_coord = pixel2cam(gt_depth_val,gt_lm_coord,data_dict["matK"])
    pred_cam_coord = pixel2cam(pred_depth_val,pred_lm_coord,data_dict["matK"])

    #import pdb;pdb.set_trace()
    gt_lm_3D = tf.matmul(quat_gt.as_rotation_matrix(), gt_cam_coord)+tf.tile(tf.expand_dims(translation[:,0:3]*tf.expand_dims(translation[:,-1],axis=1),axis=2),[1,1,tf.shape(gt_cam_coord)[2]])#  +tf.tile(tf.expand_dims(translation[:,0:3]*translation[:,3],axis=2),[1,1,tf.shape(gt_cam_coord)[2]])
    pred_lm_3D = tf.matmul(quat_est.as_rotation_matrix(), pred_lm_coord)+tf.tile(tf.expand_dims(pose[:,4:-1]*tf.expand_dims(pose[:,-1],axis=1),axis=2),[1,1,tf.shape(gt_cam_coord)[2]])#   +tf.tile(tf.expand_dims(pose[:,4:-1]*translation[:,-1],axis=2),[1,1,tf.shape(pred_lm_coord)[2]])

    lm3d_weights = tf.clip_by_value(visibility,0.0,1.5)
    lm3d_weights = tf.tile(tf.expand_dims(lm3d_weights,axis=1),[1,3,1])
    gt_lm_3D = gt_lm_3D*lm3d_weights
    #import pdb;pdb.set_trace()
    transformation_loss = l2loss(gt_lm_3D,pred_lm_3D,lm3d_weights)    
Exemplo n.º 15
0
    def test_quaternion_divide(self):
        qs_np, qs_tf = get_quaternions()
        with self.test_session() as sess:
            # Check scalar division
            for q in qs_tf[QS_FINITENONZERO]:
                # use + np.zeros to broadcast qs_np[Q_1] to the multidim shape
                self.assertAllClose(q / q, qs_np[Q_1] + np.zeros(q.shape))
                self.assertRaises(TypeError, tfq.quaternion_divide, 1, q)
                self.assertAllClose(1.0 / q, q.inverse())
                self.assertAllClose([1.0] / q, q.inverse())
                self.assertAllClose(0.0 / q, qs_np[Q_0] + np.zeros(q.shape))
                for s in [-3., -2.3, -1.2, -1., 0., 1.0, 1.2, 2.3, 3.]:
                    self.assertAllClose(s / q, s * (q.inverse()))
            for q in qs_tf[QS_NONNAN]:
                self.assertAllClose(q / 1.0, q)
                for s in [-3., -2.3, -1.2, -1., 0., 1.0, 1.2, 2.3, 3.]:
                    # use np.array(1) to allow division by zero
                    gt = q * (np.array(1.0) / s).astype(np.float32)
                    self.assertAllClose(q / s, gt)

            # Check linearity
            pl1, pl2, pl3 = [
                tf.placeholder(tf.float32, (None, 4)) for i in range(3)
            ]
            q1, q2, q3 = [tfq.Quaternion(pl) for pl in [pl1, pl2, pl3]]
            ops = [(q1 + q2) / q3, (q1 / q3) + (q2 / q3)]
            for q1np, q2np in itertools.permutations(qs_np[QS_FINITE], 2):
                for q3np in qs_np[QS_FINITENONZERO]:
                    q1np = q1np.reshape(1, 4) if q1np.shape == (4, ) else q1np
                    q2np = q2np.reshape(1, 4) if q2np.shape == (4, ) else q2np
                    q3np = q3np.reshape(1, 4) if q3np.shape == (4, ) else q3np
                    fd = {pl1: q1np, pl2: q2np, pl3: q3np}
                    a, b = sess.run(ops, feed_dict=fd)
                    # checks (q1 + q2) / q3 == (q1 / q3) + (q2 / q3)
                    self.assertAllClose(a, b)

            # Check the multiplication table
            for q in [qs_tf[Q_1], qs_tf[X], qs_tf[Y], qs_tf[Z]]:
                self.assertAllClose(qs_tf[Q_1] / q, q.conj())
                self.assertAllClose(q / qs_tf[Q_1], q)
            self.assertAllClose(qs_tf[X] / qs_tf[X], qs_tf[Q_1])
            self.assertAllClose(qs_tf[X] / qs_tf[Y], -qs_tf[Z])
            self.assertAllClose(qs_tf[X] / qs_tf[Z], qs_tf[Y])
            self.assertAllClose(qs_tf[Y] / qs_tf[X], qs_tf[Z])
            self.assertAllClose(qs_tf[Y] / qs_tf[Y], qs_tf[Q_1])
            self.assertAllClose(qs_tf[Y] / qs_tf[Z], -qs_tf[X])
            self.assertAllClose(qs_tf[Z] / qs_tf[X], -qs_tf[Y])
            self.assertAllClose(qs_tf[Z] / qs_tf[Y], qs_tf[X])
            self.assertAllClose(qs_tf[Z] / qs_tf[Z], qs_tf[Q_1])
Exemplo n.º 16
0
    def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
                  num_objs, do_augmentation):
        """The input function 's part that is shared.

        This function creates the scene point clouds from scene descriptions.

        Returns: Two tf.Tensors, the first contains all points of the
            objects in the batch with shape (N, 3) and the second contains the
            corresponding segment ids, the shape is (N,).
        """
        batch_size = tf.shape(obj_ids)[0]
        # flatten all inputs
        obj_ids = tf.reshape(obj_ids, (-1, ))
        translations = tf.reshape(translations, (-1, 3))
        rotations = tf.reshape(rotations, (-1, 4))
        clouds_num_points = (self.cloud_slice_indices[1:] -
                             self.cloud_slice_indices[:-1])
        # vector with the number of points of each cloud
        num_points = tf.gather(clouds_num_points, obj_ids)
        # vector with a range where each number i is num_points[i] repeated
        segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
                                   tf.to_int32(num_points), batch_size,
                                   num_objs)
        segment_ids = tf.to_int32(segment_ids)
        # repeat translations[i] and rotations[i] num_points[i] times
        translations = tf.gather(translations, segment_ids)
        rotations = tf.gather(rotations, segment_ids)
        rotations = tfq.Quaternion(rotations)
        obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
        # indices of points consist of the start index plus range(num_points)
        start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
        ranges = tf.cond(
            tf.equal(batch_size, 1),
            lambda: tf.concat([tf.range(num_points[i]) for i in range(2)],
                              axis=0), lambda: tf.
            concat([tf.range(num_points[i]) for i in range(num_objs)], axis=0))
        point_ids = tf.to_int32(start + ranges)
        points = tf.gather(self.clouds_tensor, point_ids)
        # Rotate objects. Note that the quaternions are relative to the object
        # clouds' origins, so no centering using the mean is required.
        points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
        points = tf.squeeze(points) + translations
        # if we're training, randomly rotate around the z_axis
        if do_augmentation:
            points = augment.pointcloud(points, segment_ids, batch_size,
                                        train_batch_size)
        return points, tf.to_float(segment_ids)
Exemplo n.º 17
0
def get_quaternions():
    """ Returns two np.arrays of testing quaternions as np.array and
        Quaternion.
    """
    q_nan1 = np.array([np.nan, 0., 0., 0.], dtype=np.float32)
    q_inf1 = np.array([np.inf, 0., 0., 0.], dtype=np.float32)
    q_minf1 = np.array([-np.inf, 0., 0., 0.], dtype=np.float32)
    q_0 = np.array([0., 0., 0., 0.], dtype=np.float32)
    q_1 = np.array([1., 0., 0., 0.], dtype=np.float32)
    x = np.array([0., 1., 0., 0.], dtype=np.float32)
    y = np.array([0., 0., 1., 0.], dtype=np.float32)
    z = np.array([0., 0., 0., 1.], dtype=np.float32)
    q = np.array([1.1, 2.2, 3.3, 4.4], dtype=np.float32)
    qneg = np.array([-1.1, -2.2, -3.3, -4.4], dtype=np.float32)
    qbar = np.array([1.1, -2.2, -3.3, -4.4], dtype=np.float32)
    qnormalized = np.array([
        0.18257418583505537115232326093360, 0.36514837167011074230464652186720,
        0.54772255750516611345696978280080, 0.73029674334022148460929304373440
    ],
                           dtype=np.float32)
    qlog = np.array([
        1.7959088706354, 0.515190292664085, 0.772785438996128, 1.03038058532817
    ],
                    dtype=np.float32)
    qexp = np.array([
        2.81211398529184, -0.392521193481878, -0.588781790222817,
        -0.785042386963756
    ],
                    dtype=np.float32)
    qmultidim = np.array([[1.1, 2.2, 3.3, 4.4], [1.1, 2.2, 3.3, 4.4]],
                         dtype=np.float32)
    np_quats = np.array([
        q_nan1, q_inf1, q_minf1, q_0, q_1, x, y, z, q, qneg, qbar, qnormalized,
        qlog, qexp, qmultidim
    ],
                        dtype=object)
    tf_quats = np.array([tfq.Quaternion(q_np) for q_np in np_quats])
    return np_quats, tf_quats
Exemplo n.º 18
0
 def test_quaternion_to_tensor(self):
     ref = tfq.Quaternion([1.0, 0.0, 0.0, 0.0])
     with self.test_session():
         self.assertAllEqual(type(tf.convert_to_tensor(ref)), tf.Tensor)
         # reduce sum internally calls tf.convert_to_tensor
         self.assertAllEqual(tf.reduce_sum(ref), 1.0)
Exemplo n.º 19
0
def quat_mult_error(y_true, y_pred):
    q_hat = tfq.Quaternion(tf.gather(y_true, [3, 0, 1, 2], axis=1))
    q = tfq.Quaternion(tf.gather(y_pred, [3, 0, 1, 2], axis=1)).normalized()
    q_prod = q * q_hat.conjugate()
    w, x, y, z = tf.split(q_prod, num_or_size_splits=4, axis=-1)
    return tf.abs(tf.multiply(2.0, tf.concat(values=[x, y, z], axis=-1)))
Exemplo n.º 20
0
 def test_norm(self):
     with self.test_session():
         self.assertAllEqual(tfq.Quaternion((1, 2, 3, 4)).norm(), [30.0])
         self.assertAllEqual(
             tfq.Quaternion((-1, -2, -3, -4)).norm(), [30.0])
         self.assertAllEqual(tfq.Quaternion((0, 0, 0, 0)).norm(), [0.0])
Exemplo n.º 21
0
def compute_loss(output, data_dict, FLAGS):

    if FLAGS.model == "pose":
        pose = output[2]

    if FLAGS.model == "hourglass":
        pre_landmark_init = output[0][1]
        pred_landmark = output[1][1]
    else:
        pred = output[0]
        pred_landmark = output[1]

    #=======
    #Depth loss
    #=======
    total_loss = 0
    depth_loss = 0
    landmark_loss = 0
    vis_loss = 0
    transformation_loss = 0

    depth_weight = 10000
    landmark_weight = 1
    vis_weight = 10
    translation_weight = 100
    quaternion_weight = 5000
    scale_weight = 0.1

    label_batch = data_dict['label']
    landmark = data_dict['points2D']
    visibility = data_dict['visibility']

    visibility

    quaternion = data_dict['quaternion']
    translation = data_dict['translation']
    depth = data_dict["depth"]

    if FLAGS.with_seg:
        #Segmentation loss
        for s in range(FLAGS.num_scales):
            curr_label = tf.image.resize_area(label_batch, [
                int(FLAGS.img_height / (2**s)),
                int(FLAGS.img_width / (2**s))
            ])
            depth_loss += l2loss(curr_label, pred[s]) / (2**s)
        depth_loss = depth_weight * depth_loss

    if FLAGS.model == "pose":
        quat_est = tfq.Quaternion(pose[:, 0:4])
        quat_gt = tfq.Quaternion(quaternion)

        #Get gt and estimate landmark locations
        pred_lm_coord = tf.reverse(argmax_2d(pred_landmark), [1])
        gt_lm_coord = tf.reverse(argmax_2d(landmark), [1])

        #import pdb;pdb.set_trace()

        batch_index = tf.tile(
            tf.expand_dims(tf.range(tf.shape(pred_landmark)[0]), 1),
            [1, tf.shape(pred_landmark)[3]])
        index_gt = tf.concat([
            tf.expand_dims(batch_index, axis=2),
            tf.transpose(tf.reverse(gt_lm_coord, [1]), [0, 2, 1])
        ],
                             axis=2)
        index_pred = tf.concat([
            tf.expand_dims(batch_index, axis=2),
            tf.transpose(tf.reverse(pred_lm_coord, [1]), [0, 2, 1])
        ],
                               axis=2)

        gt_depth_val = tf.gather_nd(depth[:, :, :, 0], index_gt) * 100.0
        pred_depth_val = tf.gather_nd(depth[:, :, :, 0], index_pred) * 100.0

        ones = tf.ones(
            [tf.shape(pred_landmark)[0], 1,
             tf.shape(pred_landmark)[3]])
        pred_lm_coord = tf.concat([tf.cast(pred_lm_coord, tf.float32), ones],
                                  axis=1)
        gt_lm_coord = tf.concat([tf.cast(gt_lm_coord, tf.float32), ones],
                                axis=1)

        gt_cam_coord = pixel2cam(gt_depth_val, gt_lm_coord, data_dict["matK"])
        pred_cam_coord = pixel2cam(pred_depth_val, pred_lm_coord,
                                   data_dict["matK"])

        #import pdb;pdb.set_trace()
        gt_lm_3D = tf.matmul(quat_gt.as_rotation_matrix(
        ), gt_cam_coord) + tf.tile(
            tf.expand_dims(translation[:, 0:3] *
                           tf.expand_dims(translation[:, -1], axis=1),
                           axis=2), [1, 1, tf.shape(gt_cam_coord)[2]]
        )  #  +tf.tile(tf.expand_dims(translation[:,0:3]*translation[:,3],axis=2),[1,1,tf.shape(gt_cam_coord)[2]])
        pred_lm_3D = tf.matmul(quat_est.as_rotation_matrix(
        ), pred_lm_coord) + tf.tile(
            tf.expand_dims(pose[:, 4:-1] * tf.expand_dims(pose[:, -1], axis=1),
                           axis=2), [1, 1, tf.shape(gt_cam_coord)[2]]
        )  #   +tf.tile(tf.expand_dims(pose[:,4:-1]*translation[:,-1],axis=2),[1,1,tf.shape(pred_lm_coord)[2]])

        lm3d_weights = tf.clip_by_value(visibility, 0.0, 1.5)
        lm3d_weights = tf.tile(tf.expand_dims(lm3d_weights, axis=1), [1, 3, 1])
        gt_lm_3D = gt_lm_3D * lm3d_weights
        #import pdb;pdb.set_trace()
        transformation_loss = l2loss(gt_lm_3D, pred_lm_3D, lm3d_weights)

    if FLAGS.model == "multiscale":
        for s in range(FLAGS.num_scales):
            curr_landmark = tf.image.resize_area(landmark, [
                int(FLAGS.img_height / (2**s)),
                int(FLAGS.img_width / (2**s))
            ])
            landmark_loss += l2loss(
                curr_landmark, pred_landmark[s]) / (2**s) * landmark_weight

    elif FLAGS.model == "hourglass":
        landmark_loss = l2loss(data_dict["landmark_init"],
                               pre_landmark_init) * landmark_weight
        landmark_loss = l2loss(landmark,
                               pred_landmark) * landmark_weight + landmark_loss

    else:
        landmark_loss = l2loss(landmark, pred_landmark) * landmark_weight

    total_loss = depth_loss + landmark_loss + vis_loss + transformation_loss

    return total_loss, depth_loss, landmark_loss, vis_loss, transformation_loss
Exemplo n.º 22
0
 def test___sub__(self):
     a = tfq.Quaternion([1, 2, 3, 4])
     b = tfq.Quaternion([5, 6, 7, 8])
     result = np.array([-4, -4, -4, -4], dtype=np.float32)
     with self.test_session():
         self.assertAllEqual(a - b, result)
Exemplo n.º 23
0
 def test___add__(self):
     a = tfq.Quaternion([1, 2, 3, 4])
     b = tfq.Quaternion([5, 6, 7, 8])
     result = np.array([6, 8, 10, 12], dtype=np.float32)
     with self.test_session():
         self.assertAllEqual(a + b, result)
Exemplo n.º 24
0
 def test_value(self):
     val = [1.0, 2.0, 3.0, 4.0]
     q = tfq.Quaternion(val)
     with self.test_session():
         self.assertAllEqual(q, tf.constant(val))
Exemplo n.º 25
0
import tfrt.distributions as distributions
import tfrt.drawing as drawing
from tfrt.spectrumRGB import rgb
import tfrt.geometry as geometry

angular_size = itertools.cycle(
    np.array([PI / 2, PI / 3, PI / 4, PI / 8, PI / 12, PI / 48],
             dtype=np.float64))
sample_count = itertools.cycle([100, 500, 2500])
center = itertools.cycle([(0, 0, 0), (3, 0, 0), (0, 3, 0)])
central_angle = tf.constant((1.0, 0.0, 0.0), dtype=tf.float64)

angle_step_size = PI / 12
a = np.cos(angle_step_size / 2.0)
b = np.sin(angle_step_size / 2.0)
x_rotation = tfq.Quaternion((a, b, 0.0, 0.0), dtype=tf.float64)
y_rotation = tfq.Quaternion((a, 0.0, b, 0.0), dtype=tf.float64)
z_rotation = tfq.Quaternion((a, 0.0, 0.0, b), dtype=tf.float64)

# build the source rays
angles = distributions.StaticUniformSphere(next(angular_size),
                                           next(sample_count))
source = sources.PointSource(3,
                             next(center),
                             central_angle,
                             angles, [drawing.YELLOW],
                             dense=True)

plot = pv.Plotter()
plot.add_axes()
drawer = drawing.RayDrawer3D(plot)