Example #1
0
    def _build_model(self):
        """Build our MLP network."""

        with tf.variable_scope("Matchnet", reuse=tf.AUTO_REUSE):
            # For determining the runtime shape
            x_shp = tf.shape(self.x_in)

            # -------------------- Network archintecture --------------------
            # Import correct build_graph function
            from archs.cvpr2018 import build_graph
            # Build graph
            print("Building Graph")
            self.logits = build_graph(self.x_in, self.is_training, self.config)
            # ---------------------------------------------------------------

            # Turn into weights for each sample
            self.w = tf.nn.relu(tf.tanh(self.logits))  # bs, n ,2
Example #2
0
    def _build_model(self):
        """Build our MLP network."""

        with tf.variable_scope("Matchnet", reuse=tf.AUTO_REUSE):
            # For determining the runtime shape
            x_shp = tf.shape(self.x_in)

            # -------------------- Network archintecture --------------------
            # Import correct build_graph function
            arch = self.config.net_arch
            if arch == "cvpr_2018":
                import archs.cvpr2018 as arch
            elif arch == "nips_2018_nl":
                import archs.nips2018_nl as arch
            # Build graph
            print("Building Graph")
            self.logits = arch.build_graph(self.x_in, self.is_training, self.config)
            # ---------------------------------------------------------------

            # Turn into weights for each sample
            weights = tf.nn.relu(tf.tanh(self.logits))

            # Make input data (num_img_pair x num_corr x 4)
            xx = tf.transpose(tf.reshape(
                self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))

            # Create the matrix to be used for the eight-point algorithm
            X = tf.transpose(tf.stack([
                xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],
                xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],
                xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])
            ], axis=1), (0, 2, 1))
            print("X shape = {}".format(X.shape))
            wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X
            print("wX shape = {}".format(wX.shape))
            XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)
            print("XwX shape = {}".format(XwX.shape))

            # Recover essential matrix from self-adjoing eigen
            e, v = tf.self_adjoint_eig(XwX)
            self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))
            # Make unit norm just in case
            self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)
    def _build_model(self):
        """Build our MLP network."""

        with tf.variable_scope("Matchnet", reuse=tf.AUTO_REUSE):
            # For determining the runtime shape
            x_shp = tf.shape(self.x_in)

            # -------------------- Network archintecture --------------------
            # Import correct build_graph function
            from archs.cvpr2018 import build_graph
            # Build graph
            print("Building Graph")
            self.logits = build_graph(self.x_in, self.is_training, self.config)
            # ---------------------------------------------------------------

            # Turn into weights for each sample
            weights = tf.nn.relu(tf.tanh(self.logits))
            # Move to the Richard Hartley's normalized system
            # Normalization matrix for modifying GT E
            x_mean = tf.reduce_mean(self.x_in, axis=2, keepdims=True)
            x_norm = self.x_in - x_mean
            x_mss = tf.square(x_norm)
            root2 = tf.sqrt(tf.constant(2., dtype=tf.float32))
            x_rms1 = root2 / tf.reduce_mean(tf.sqrt(
                tf.reduce_sum(x_mss[:, :, :, :2], axis=-1, keepdims=True)),
                                            axis=2,
                                            keepdims=True)
            x_rms2 = root2 / tf.reduce_mean(tf.sqrt(
                tf.reduce_sum(x_mss[:, :, :, 2:], axis=-1, keepdims=True)),
                                            axis=2,
                                            keepdims=True)
            x_norm *= tf.concat([x_rms1, x_rms1, x_rms2, x_rms2], axis=-1)

            scale1 = x_rms1[:, 0, 0, 0]
            dx1 = -x_mean[:, 0, 0, 0] * scale1
            dy1 = -x_mean[:, 0, 0, 1] * scale1
            scale2 = x_rms2[:, 0, 0, 0]
            dx2 = -x_mean[:, 0, 0, 2] * scale2
            dy2 = -x_mean[:, 0, 0, 3] * scale2
            zeros = tf.zeros_like(scale1)
            ones = tf.ones_like(scale1)
            self.p1 = tf.reshape(
                tf.stack([
                    scale1, zeros, dx1, zeros, scale1, dy1, zeros, zeros, ones
                ],
                         axis=1), (x_shp[0], 3, 3))
            self.p2 = tf.reshape(
                tf.stack([
                    scale2, zeros, dx2, zeros, scale2, dy2, zeros, zeros, ones
                ],
                         axis=1), (x_shp[0], 3, 3))
            xx = tf.transpose(tf.reshape(x_norm, (x_shp[0], x_shp[2], 4)),
                              (0, 2, 1))

            # # Make input data (num_img_pair x num_corr x 4)
            # xx = tf.transpose(tf.reshape(
            #     self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))

            # Create the matrix to be used for the eight-point algorithm
            X = tf.transpose(
                tf.stack([
                    xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],
                    xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],
                    xx[:, 0], xx[:, 1],
                    tf.ones_like(xx[:, 0])
                ],
                         axis=1), (0, 2, 1))
            print("X shape = {}".format(X.shape))
            wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X
            print("wX shape = {}".format(wX.shape))
            self.XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)
            print("XwX shape = {}".format(self.XwX.shape))