Пример #1
0
    def _build_model(self):

        # For determining the runtime shape
        # x_shp = tf.shape(self.x_in)

        # -------------------- Network architecture --------------------
        # Import correct build_graph function
        from archs.arch import build_graph
        print("Building Graph")
        self.logits, self.R_hat, self.t_hat = build_graph(self.x_in, self.is_training, self.config)
        # ---------------------------------------------------------------

        self.weights = tf.nn.relu(tf.tanh(self.logits))


        from ops import tf_skew_symmetric

        if self.config.representation == 'lie':
            self.skew = tf_skew_symmetric(self.R_hat)
            self.R_hat = tf.reshape(self.skew, [-1, 3, 3])
            self.R_hat = tf.linalg.expm(self.R_hat)

        elif self.config.representation == 'quat':
            self.R_hat = tf_matrix_from_quaternion(self.R_hat)
            self.R_hat = tf.reshape(self.R_hat, [-1, 3, 3])

        elif self.config.representation == 'linear':
            self.R_hat = tf.reshape(self.R_hat, [-1, 3, 3])

        else:
            print('Not a valid representation')
            exit(10)
    def _build_loss(self):
        """Build our cross entropy loss."""

        with tf.variable_scope("EigFreeLoss", reuse=tf.AUTO_REUSE):

            x_shp = tf.shape(self.x_in)
            gt_geod_d = self.y_in[:, :, 0]
            # recall and precision
            is_pos = tf.to_float(gt_geod_d < self.config.obj_geod_th)
            is_neg = tf.to_float(gt_geod_d >= self.config.obj_geod_th)
            precision = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(self.logits > 0) * is_pos, axis=1) /
                (tf.reduce_sum(tf.to_float(self.logits > 0) *
                               (is_pos + is_neg),
                               axis=1) + 1.e-6))
            tf.summary.scalar("precision", precision)
            recall = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(self.logits > 0) * is_pos, axis=1) /
                (tf.reduce_sum(is_pos, axis=1) + 1.e-6))
            tf.summary.scalar("recall", recall)
            # Get groundtruth Essential matrix
            e_gt_unnorm = tf.reshape(
                tf.matmul(
                    tf.reshape(tf_skew_symmetric(self.t_in), (x_shp[0], 3, 3)),
                    tf.reshape(self.R_in, (x_shp[0], 3, 3))), (x_shp[0], 9))
            e_gt = e_gt_unnorm / tf.norm(e_gt_unnorm, axis=1, keepdims=True)

            # build loss
            p2_e_gt = tf.matmul(
                tf.transpose(tf.matrix_inverse(self.p2), (0, 2, 1)),
                tf.reshape(e_gt, (x_shp[0], 3, 3)))
            e_gt = tf.reshape(tf.matmul(p2_e_gt, tf.matrix_inverse(self.p1)),
                              (x_shp[0], 9))
            e_gt /= tf.norm(e_gt, axis=1, keepdims=True)

            p = tf.expand_dims(e_gt, axis=-1)
            p_t = tf.transpose(p, [0, 2, 1])
            data_term = tf.matmul(tf.matmul(p_t, self.XwX), p)

            p_hat = tf.eye(9, batch_shape=[x_shp[0]],
                           dtype=tf.float32) - tf.matmul(p, p_t)
            # p_hat_t = tf.transpose(p_hat, [0, 2, 1])
            # XwX_e_neg = tf.matmul(tf.matmul(p_hat_t, self.XwX), p_hat)
            XwX_e_neg = tf.matmul(self.XwX, p_hat)
            trace = tf.trace(XwX_e_neg)
            norm_term = tf.exp(-1.e-3 * trace)
            # self.loss = tf.reduce_mean(data_term)
            self.loss = tf.reduce_mean(data_term + 10. * norm_term)
            tf.summary.scalar("data_term", tf.reduce_mean(data_term))
            tf.summary.scalar("norm_term", tf.reduce_mean(norm_term))
            tf.summary.scalar("loss", self.loss)
Пример #3
0
    def _build_loss(self):
        """Build our cross entropy loss."""

        with tf.variable_scope("Loss", reuse=tf.AUTO_REUSE):

            x_shp = tf.shape(self.x_in)
            # The groundtruth epi sqr
            gt_geod_d = self.y_in[:, :, 0]
            # tf.summary.histogram("gt_geod_d", gt_geod_d)

            # Get groundtruth Essential matrix
            e_gt_unnorm = tf.reshape(
                tf.matmul(
                    tf.reshape(tf_skew_symmetric(self.t_in), (x_shp[0], 3, 3)),
                    tf.reshape(self.R_in, (x_shp[0], 3, 3))), (x_shp[0], 9))
            e_gt = e_gt_unnorm / tf.norm(e_gt_unnorm, axis=1, keep_dims=True)

            # e_hat = tf.reshape(tf.matmul(
            #     tf.reshape(t_hat, (-1, 3, 3)),
            #     tf.reshape(r_hat, (-1, 3, 3))
            # ), (-1, 9))

            # Essential matrix loss
            essential_loss = tf.reduce_mean(
                tf.minimum(tf.reduce_sum(tf.square(self.e_hat - e_gt), axis=1),
                           tf.reduce_sum(tf.square(self.e_hat + e_gt),
                                         axis=1)))
            tf.summary.scalar("essential_loss", essential_loss)

            # Classification loss
            is_pos = tf.to_float(gt_geod_d < self.config.obj_geod_th)
            is_neg = tf.to_float(gt_geod_d >= self.config.obj_geod_th)
            c = is_pos - is_neg
            classif_losses = -tf.log(tf.nn.sigmoid(c * self.logits))

            # balance
            num_pos = tf.nn.relu(tf.reduce_sum(is_pos, axis=1) - 1.0) + 1.0
            num_neg = tf.nn.relu(tf.reduce_sum(is_neg, axis=1) - 1.0) + 1.0
            classif_loss_p = tf.reduce_sum(classif_losses * is_pos, axis=1)
            classif_loss_n = tf.reduce_sum(classif_losses * is_neg, axis=1)
            classif_loss = tf.reduce_mean(classif_loss_p * 0.5 / num_pos +
                                          classif_loss_n * 0.5 / num_neg)
            tf.summary.scalar("classif_loss", classif_loss)
            tf.summary.scalar("classif_loss_p",
                              tf.reduce_mean(classif_loss_p * 0.5 / num_pos))
            tf.summary.scalar("classif_loss_n",
                              tf.reduce_mean(classif_loss_n * 0.5 / num_neg))
            precision = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(self.logits > 0) * is_pos, axis=1) /
                tf.reduce_sum(tf.to_float(self.logits > 0) * (is_pos + is_neg),
                              axis=1))
            tf.summary.scalar("precision", precision)
            recall = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(self.logits > 0) * is_pos, axis=1) /
                tf.reduce_sum(is_pos, axis=1))
            tf.summary.scalar("recall", recall)

            # L2 loss
            for var in tf.trainable_variables():
                if "weights" in var.name:
                    tf.add_to_collection("l2_losses", tf.reduce_sum(var**2))
            l2_loss = tf.add_n(tf.get_collection("l2_losses"))
            tf.summary.scalar("l2_loss", l2_loss)

            # Check global_step and add essential loss
            self.loss = self.config.loss_decay * l2_loss
            if self.config.loss_essential > 0:
                self.loss += (self.config.loss_essential * essential_loss *
                              tf.to_float(self.global_step >= tf.to_int64(
                                  self.config.loss_essential_init_iter)))
            if self.config.loss_classif > 0:
                self.loss += self.config.loss_classif * classif_loss

            tf.summary.scalar("loss", self.loss)
Пример #4
0
    def _build_loss(self, e_hat, logit, x_in, y_in, weights, name=""):
        """Build our cross entropy loss."""

        with tf.variable_scope("Loss_{}".format(name), reuse=tf.AUTO_REUSE):
            x_shp = tf.shape(self.x_in)
            # The groundtruth epi sqr
            gt_geod_d = y_in[:, :, 0]
            # tf.summary.histogram("gt_geod_d", gt_geod_d)

            # Get groundtruth Essential matrix
            e_gt_unnorm = tf.reshape(
                tf.matmul(
                    tf.reshape(tf_skew_symmetric(self.t_in), (x_shp[0], 3, 3)),
                    tf.reshape(self.R_in, (x_shp[0], 3, 3))), (x_shp[0], 9))
            e_gt = e_gt_unnorm / tf.norm(e_gt_unnorm, axis=1, keep_dims=True)

            # e_hat = tf.reshape(tf.matmul(
            #     tf.reshape(t_hat, (-1, 3, 3)),
            #     tf.reshape(r_hat, (-1, 3, 3))
            # ), (-1, 9))

            # Essential matrix loss
            essential_loss = tf.reduce_mean(
                tf.minimum(tf.reduce_sum(tf.square(e_hat - e_gt), axis=1),
                           tf.reduce_sum(tf.square(e_hat + e_gt), axis=1)))

            tf.summary.scalar("essential_loss", essential_loss)

            # Classification loss
            is_pos = tf.to_float(gt_geod_d < self.config.obj_geod_th)
            is_neg = tf.to_float(gt_geod_d >= self.config.obj_geod_th)
            c = is_pos - is_neg

            classif_losses = -tf.log(tf.nn.sigmoid(c * logit))

            # balance
            num_pos = tf.nn.relu(tf.reduce_sum(is_pos, axis=1) - 1.0) + 1.0
            num_neg = tf.nn.relu(tf.reduce_sum(is_neg, axis=1) - 1.0) + 1.0
            classif_loss_p = tf.reduce_sum(classif_losses * is_pos, axis=1)
            classif_loss_n = tf.reduce_sum(classif_losses * is_neg, axis=1)

            classif_loss = tf.reduce_mean(classif_loss_p * 0.5 / num_pos +
                                          classif_loss_n * 0.5 / num_neg)
            tf.summary.scalar("classif_loss", classif_loss)
            tf.summary.scalar("classif_loss_p",
                              tf.reduce_mean(classif_loss_p * 0.5 / num_pos))
            tf.summary.scalar("classif_loss_n",
                              tf.reduce_mean(classif_loss_n * 0.5 / num_neg))
            precision = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(logit > 0) * is_pos, axis=1) /
                tf.reduce_sum(tf.to_float(logit > 0) * (is_pos + is_neg),
                              axis=1))
            tf.summary.scalar("precision", precision)
            recall = tf.reduce_mean(
                tf.reduce_sum(tf.to_float(logit > 0) * is_pos, axis=1) /
                tf.reduce_sum(is_pos, axis=1))
            tf.summary.scalar("recall", recall)
            self.precision = precision
            self.recall = recall

            loss = 0

            if self.config.loss_essential > 0:
                loss += (self.config.loss_essential * essential_loss *
                         tf.to_float(self.global_step >= tf.to_int64(
                             self.config.loss_essential_init_iter)))
            if self.config.loss_classif > 0:
                loss += self.config.loss_classif * classif_loss

            if self.config.loss_multi_logit > 0:
                # init value is 0
                th_logit = self.config.th_logit
                classif_multi_logit = []
                logit_attentions = tf.get_collection(
                    "logit_attention")  #  Get logits for local attention
                for logit_attention in logit_attentions:
                    print("attention : {}".format(logit_attention.name))
                    logit_i = tf.squeeze(logit_attention - th_logit, [1, 3])
                    gt_geod_d = y_in[:, :, 0]
                    is_pos = tf.to_float(gt_geod_d < self.config.obj_geod_th)
                    is_neg = tf.to_float(gt_geod_d >= self.config.obj_geod_th)
                    c = is_pos - is_neg
                    classif_losses = -tf.log_sigmoid(c * logit_i)
                    num_pos = tf.nn.relu(tf.reduce_sum(is_pos, axis=1) -
                                         1.0) + 1.0
                    num_neg = tf.nn.relu(tf.reduce_sum(is_neg, axis=1) -
                                         1.0) + 1.0
                    classif_loss_p = tf.reduce_sum(classif_losses * is_pos,
                                                   axis=1)
                    classif_loss_n = tf.reduce_sum(classif_losses * is_neg,
                                                   axis=1)
                    classif_loss = tf.reduce_mean(
                        classif_loss_p * 0.5 / num_pos +
                        classif_loss_n * 0.5 / num_neg)
                    classif_multi_logit += [classif_loss]
                classif_multi_logit = tf.reduce_mean(
                    tf.stack(classif_multi_logit))
                loss += classif_multi_logit * self.config.loss_multi_logit

            tf.summary.scalar("loss", loss)
            return loss