コード例 #1
0
def get_geometric_loss(opts, sample, output_sim, name='geo_loss'):
  """Get geometric loss funcion (epipolar constraints)
  Input:
  - opts (options) - object with all relevant options stored
  - sample (dict) - sample output from dataset
  - output_sim (tf.Tensor) - output similarity from network
  - name (string, optional) - name prefix for tensorflow scoping (default geo_loss)
  Output: None
  """
  b = opts.batch_size
  v = opts.dataset_params.views[-1]
  p = opts.dataset_params.points[-1]
  # Build rotation matrices
  batch_size = tf.shape(sample['Rotations'])[0]
  R = tf.reshape(tf.tile(sample['Rotations'], [ 1, 1, p, 1 ]), [-1, v*p, 3, 3])
  T = tf.reshape(tf.tile(sample['Translations'], [ 1, 1, p ]), [-1, v*p, 3])
  X = tf.concat([ sample['InitEmbeddings'][...,-4:-2],
                  tf.tile(tf.ones((1,v*p,1)), [ batch_size, 1, 1 ]) ], axis=-1)
  # Compute absolute essential losses
  RX = tf.einsum('bvik,bvk->bvi',R,X)
  TcrossRX = tf.cross(T, RX)
  E_part = tfutils.batch_matmul(RX, tf.transpose(TcrossRX, perm=[0, 2, 1]))
  # Compute mask of essential losses to not include self loops
  npmask = np.kron(1-np.eye(v),np.ones((p,p))).reshape(1,v*p,v*p).astype(opts.dataset_params.dtype)
  mask = tf.convert_to_tensor(npmask, name='mask_{}'.format(name))
  # Compute symmetric part
  E = tf.multiply(tf.abs(E_part + tf.transpose(E_part, [0, 2, 1])), mask)
  if opts.full_tensorboard: # Add to tensorboard
    tf.summary.image('Geometric matrix {}'.format(name), tf.expand_dims(E, -1))
    tf.summary.histogram('Geometric matrix hist {}'.format(name), E)
    tf.summary.scalar('Geometric matrix norm {}'.format(name), tf.norm(E, ord=np.inf))
  return tf.reduce_mean(tf.multiply(output_sim, E), name=name)
コード例 #2
0
 def get_geometric_loss(self, sample, output_sim, name='geo_loss'):
     b, v, p = self.tensor_sizes
     # Build rotation and cross product matrices
     R = tf.reshape(tf.tile(sample['rots'], [1, 1, p, 1]),
                    [-1, v * p, 3, 3])
     T = tf.reshape(tf.tile(sample['trans'], [1, 1, p]), [-1, v * p, 3])
     nodes = sample['graph'].nodes
     X = tf.concat([
         tf.reshape(nodes[..., -4:-2], [b, -1, 2]),
         tf.tile(tf.ones((1, v * p, 1)), [b, 1, 1])
     ],
                   axis=-1)
     RX = tf.einsum('bvik,bvk->bvi', R, X)
     TcrossRX = tf.cross(T, RX)
     # Build finall Essential matrix distance score
     E_part = tfutils.batch_matmul(RX, tf.transpose(TcrossRX,
                                                    perm=[0, 2, 1]))
     # npmask = np.kron(1-np.eye(v),np.ones((p,p)))
     # npmask = npmask.reshape(1,v*p,v*p).astype(self.dataset_params.dtype)
     # mask = tf.convert_to_tensor(npmask, name='mask_{}'.format(name))
     # E = tf.multiply(tf.abs(E_part + tf.transpose(E_part, [0, 2, 1])), mask)
     E = tf.abs(E_part + tf.transpose(E_part, [0, 2, 1]))
     # Logging stuff
     tf.summary.image('Geometric matrix {}'.format(name),
                      tf.expand_dims(E, -1))
     tf.summary.histogram('Geometric matrix hist {}'.format(name), E)
     tf.summary.scalar('Geometric matrix norm {}'.format(name),
                       tf.norm(E, ord=np.inf))
     if type(output_sim) == tf.SparseTensor:
         mul_vals = tf.gather_nd(E, output_sim.indices) * output_sim.values
         geom_loss = tf.reduce_mean(mul_vals, name=name)
     else:
         geom_loss = tf.reduce_mean(tf.multiply(output_sim, E), name=name)
     return geom_loss
コード例 #3
0
def get_geometric_loss(opts, sample, output_sim, name='geo_loss'):
  b = opts.batch_size
  v = opts.dataset_params.views[-1]
  p = opts.dataset_params.points[-1]
  # Build rotation matrices
  batch_size = tf.shape(sample['Rotations'])[0]
  R = tf.reshape(tf.tile(sample['Rotations'], [ 1, 1, p, 1 ]), [-1, v*p, 3, 3])
  T = tf.reshape(tf.tile(sample['Translations'], [ 1, 1, p ]), [-1, v*p, 3])
  X = tf.concat([ sample['InitEmbeddings'][...,-4:-2],
                  tf.tile(tf.ones((1,v*p,1)), [ batch_size, 1, 1 ]) ], axis=-1)
  RX = tf.einsum('bvik,bvk->bvi',R,X)
  TcrossRX = tf.cross(T, RX)
  E_part = tfutils.batch_matmul(RX, tf.transpose(TcrossRX, perm=[0, 2, 1]))
  npmask = np.kron(1-np.eye(v),np.ones((p,p))).reshape(1,v*p,v*p).astype(opts.dataset_params.dtype)
  mask = tf.convert_to_tensor(npmask, name='mask_{}'.format(name))
  E = tf.multiply(tf.abs(E_part + tf.transpose(E_part, [0, 2, 1])), mask)
  if opts.full_tensorboard:
    tf.summary.image('Geometric matrix {}'.format(name), tf.expand_dims(E, -1))
    tf.summary.histogram('Geometric matrix hist {}'.format(name), E)
    tf.summary.scalar('Geometric matrix norm {}'.format(name), tf.norm(E, ord=np.inf))
  return tf.reduce_mean(tf.multiply(output_sim, E), name=name)
コード例 #4
0
    def _build(self, laplacian, inputs):
        input_shape = tuple(inputs.get_shape().as_list())
        if len(input_shape) != 3:
            raise snt.IncompatibleShapeError(
                "{}: rank of shape must be 3 not: {}".format(
                    self.scope_name, len(input_shape)))

        if input_shape[2] is None:
            raise snt.IncompatibleShapeError(
                "{}: Input size must be specified at module build time".format(
                    self.scope_name))

        if input_shape[1] is None:
            raise snt.IncompatibleShapeError(
                "{}: Number of nodes must be specified at module build time".
                format(self.scope_name))

        if self._input_shape is not None and \
            (input_shape[2] != self._input_shape[2] or \
             input_shape[1] != self._input_shape[1]):
            raise snt.IncompatibleShapeError(
                "{}: Input shape must be [batch_size, {}, {}] not: [batch_size, {}, {}]"
                .format(self.scope_name, self._input_shape[1],
                        self._input_shape[2], input_shape[1], input_shape[2]))

        self._input_shape = input_shape
        dtype = inputs.dtype

        for k, s in self.weight_keys:
            if k not in self._initializers:
                self._initializers[k] = tfutils.create_linear_initializer(
                    self._input_shape[2], s, dtype)

        if self._use_bias:
            for k, s in self.bias_keys:
                if k not in self._initializers:
                    self._initializers[k] = tfutils.create_bias_initializer(
                        self._input_shape[2], s, dtype)

        for k, s in self.weight_keys:
            weight_shape = (self._input_shape[2], s)
            self.weights[k] = tf.get_variable(
                k,
                shape=weight_shape,
                dtype=dtype,
                initializer=self._initializers[k],
                partitioner=self._partitioners.get(k, None),
                regularizer=self._regularizers.get(k, None))
            if self.weights[k] not in tf.get_collection('weights'):
                tf.add_to_collection('weights', self.weights[k])

        if self._use_bias:
            for k, s in self.bias_keys:
                bias_shape = (s, )
                self.weights[k] = tf.get_variable(
                    k,
                    shape=bias_shape,
                    dtype=dtype,
                    initializer=self._initializers[k],
                    partitioner=self._partitioners.get(k, None),
                    regularizer=self._regularizers.get(k, None))
            if self.weights[k] not in tf.get_collection('biases'):
                tf.add_to_collection('biases', self.weights[k])

        preactiv_ = tfutils.matmul(inputs, self.weights["w"])
        f1_ = tfutils.matmul(inputs, self.weights["f1"])
        f2_ = tfutils.matmul(inputs, self.weights["f2"])
        if self._use_bias:
            f1_ += self.weights["d1"]
            f2_ += self.weights["d2"]
        preattn_mat_ = f1_ + tf.transpose(f2_, [0, 2, 1])
        if self._sparse:
            preattn_mat = self._attn_activ(preattn_mat_) * laplacian
        else:
            preattn_mat = self._attn_activ(preattn_mat_) + laplacian
        attn_mat = tf.nn.softmax(preattn_mat, axis=-1)
        preactiv = tfutils.batch_matmul(attn_mat, preactiv_)
        skip = tfutils.matmul(inputs, self.weights["u"])

        if self._use_bias:
            preactiv += self.weights["b"]
            skip += self.weights["c"]

        activ = self._activ(preactiv) + skip

        return activ
コード例 #5
0
    def _build(self, laplacian, inputs):
        input_shape = tuple(inputs.get_shape().as_list())
        if len(input_shape) != 3:
            raise snt.IncompatibleShapeError(
                "{}: rank of shape must be 3 not: {}".format(
                    self.scope_name, len(input_shape)))

        if input_shape[2] is None:
            raise snt.IncompatibleShapeError(
                "{}: Input size must be specified at module build time".format(
                    self.scope_name))

        if input_shape[1] is None:
            raise snt.IncompatibleShapeError(
                "{}: Number of nodes must be specified at module build time".
                format(self.scope_name))

        if self._input_shape is not None and \
            (input_shape[2] != self._input_shape[2] or \
             input_shape[1] != self._input_shape[1]):
            raise snt.IncompatibleShapeError(
                "{}: Input shape must be [batch_size, {}, {}] not: [batch_size, {}, {}]"
                .format(self.scope_name, self._input_shape[1],
                        self._input_shape[2], input_shape[1], input_shape[2]))

        self._input_shape = input_shape
        dtype = inputs.dtype

        if "w" not in self._initializers:
            self._initializers["w"] = tfutils.create_linear_initializer(
                self._input_shape[2], self._output_size, dtype)
        if "u" not in self._initializers:
            self._initializers["u"] = tfutils.create_linear_initializer(
                self._input_shape[2], self._output_size, dtype)

        if "b" not in self._initializers and self._use_bias:
            self._initializers["b"] = tfutils.create_bias_initializer(
                self._input_shape[2], self._output_size, dtype)
        if "c" not in self._initializers and self._use_bias:
            self._initializers["c"] = tfutils.create_bias_initializer(
                self._input_shape[2], self._output_size, dtype)

        weight_shape = (self._input_shape[2], self.output_size)
        self._w = tf.get_variable(
            "w",
            shape=weight_shape,
            dtype=dtype,
            initializer=self._initializers["w"],
            partitioner=self._partitioners.get("w", None),
            regularizer=self._regularizers.get("w", None))
        if self._w not in tf.get_collection('weights'):
            tf.add_to_collection('weights', self._w)
        self._u = tf.get_variable(
            "u",
            shape=weight_shape,
            dtype=dtype,
            initializer=self._initializers["u"],
            partitioner=self._partitioners.get("u", None),
            regularizer=self._regularizers.get("u", None))
        if self._u not in tf.get_collection('weights'):
            tf.add_to_collection('weights', self._u)
        preactiv_ = tfutils.matmul(inputs, self._w)
        preactiv = tfutils.batch_matmul(laplacian, preactiv_)
        skip = tfutils.matmul(inputs, self._u)

        if self._use_bias:
            bias_shape = (self.output_size, )
            self._b = tf.get_variable(
                "b",
                shape=bias_shape,
                dtype=dtype,
                initializer=self._initializers["b"],
                partitioner=self._partitioners.get("b", None),
                regularizer=self._regularizers.get("b", None))
            if self._b not in tf.get_collection('biases'):
                tf.add_to_collection('biases', self._b)
            self._c = tf.get_variable(
                "c",
                shape=bias_shape,
                dtype=dtype,
                initializer=self._initializers["c"],
                partitioner=self._partitioners.get("c", None),
                regularizer=self._regularizers.get("c", None))
            if self._c not in tf.get_collection('biases'):
                tf.add_to_collection('biases', self._c)
            preactiv += self._b
            skip += self._c

        activ = self._activ(preactiv) + skip

        return activ