示例#1
0
def derotation(src_img, trt_img, rotation, input_fov, output_fov, output_shape,
               derotate_both):
    """Transform a pair of images to cancel out the rotation.

  Args:
    src_img: [BATCH, HEIGHT, WIDTH, CHANNEL] input source images.
    trt_img: [BATCH, HEIGHT, WIDTH, CHANNEL] input target images.
    rotation: [BATCH, 3, 3] relative rotations between src_img and trt_img.
    input_fov: [BATCH] a 1-D tensor (float32) of input field of view in degrees.
    output_fov: (float) output field of view in degrees.
    output_shape: a 2-D list of output dimension [height, width].
    derotate_both: Derotate both input images to an intermediate frame using
      half of the relative rotation between them.

  Returns:
    transformed images [BATCH, height, width, CHANNELS].
  """
    batch = src_img.shape.as_list()[0]
    if derotate_both:
        half_derotation = half_rotation(rotation)
        transformed_src = transformation.rotate_image_in_3d(
            src_img, tf.matrix_transpose(half_derotation), input_fov,
            output_fov, output_shape)

        transformed_trt = transformation.rotate_image_in_3d(
            trt_img, half_derotation, input_fov, output_fov, output_shape)
    else:
        transformed_src = transformation.rotate_image_in_3d(
            src_img, tf.eye(3, batch_shape=[batch]), input_fov, output_fov,
            output_shape)

        transformed_trt = transformation.rotate_image_in_3d(
            trt_img, rotation, input_fov, output_fov, output_shape)

    return (transformed_src, transformed_trt)
示例#2
0
    def decode_line(line):
        """Decode text lines."""
        DataPair = collections.namedtuple(
            'DataPair',
            ['src_img', 'trt_img', 'fov', 'rotation', 'translation'])

        splitted = tf.decode_csv(line, [''] * 10, field_delim=' ')

        img1 = load_single_image(pano_data_dir + splitted[0] + '/' +
                                 splitted[1] + '.jpeg')
        img2 = load_single_image(pano_data_dir + splitted[0] + '/' +
                                 splitted[2] + '.jpeg')
        fov = string_to_matrix(splitted[3], [1])
        r1 = string_to_matrix(splitted[4], [3, 3])
        t1 = string_to_matrix(splitted[5], [3])
        r2 = string_to_matrix(splitted[6], [3, 3])
        t2 = string_to_matrix(splitted[7], [3])
        sampled_r1 = string_to_matrix(splitted[8], [3, 3])
        sampled_r2 = string_to_matrix(splitted[9], [3, 3])

        r_c2_to_c1 = tf.matmul(sampled_r1, sampled_r2, transpose_a=True)
        t_c1 = tf.squeeze(
            tf.matmul(sampled_r1,
                      tf.expand_dims(tf.nn.l2_normalize(t2 - t1), -1),
                      transpose_a=True))

        sampled_rotation = tf.matmul(tf.stack([sampled_r1, sampled_r2], 0),
                                     tf.stack([r1, r2], 0),
                                     transpose_a=True)

        sampled_views = transformation.rectilinear_projection(
            tf.stack([img1, img2], 0), [output_height, output_width], fov,
            tf.matrix_transpose(sampled_rotation))
        src_img, trt_img = sampled_views[0], sampled_views[1]
        return DataPair(src_img, trt_img, fov, r_c2_to_c1, t_c1)
示例#3
0
    def _scaled_square_dist(self, X, X2):
        """
        Returns ((X - X2ᵀ)/lengthscales)².
        Due to the implementation and floating-point imprecision, the
        result may actually be very slightly negative for entries very
        close to each other.
        """
        X = X / self.lengthscales
        Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)

        if X2 is None:
            dist = -2 * tf.matmul(X, X, transpose_b=True)
            dist += Xs + tf.matrix_transpose(Xs)
            return dist

        X2 = X2 / self.lengthscales
        X2s = tf.reduce_sum(tf.square(X2), axis=-1, keepdims=True)
        dist = -2 * tf.matmul(X, X2, transpose_b=True)
        dist += Xs + tf.matrix_transpose(X2s)
        return dist
示例#4
0
    def forward(self, x, **kwargs):
        """Implementation Reference:
       [1] https://stackoverflow.com/questions/38235555/tensorflow-matmul-of-input-matrix-with-batch-data
    """
        assert isinstance(x, tf.Tensor) and len(x.shape) == 3
        dim1, dim2 = self.dim1, self.dim2
        xd1, xd2 = x.shape.as_list()[1:]

        # TODO: to be capsulate into hyper
        # Get weights
        W1 = tf.get_variable('W1',
                             shape=[dim1, xd1],
                             dtype=th.dtype,
                             initializer=self._weight_initializer,
                             constraint=self.constraint)
        W2 = tf.get_variable('W2',
                             shape=[xd2, dim2],
                             dtype=th.dtype,
                             initializer=self._weight_initializer,
                             constraint=self.constraint)
        # Do bilinear calculation [1]
        XT = tf.matrix_transpose(x)
        XTW1T = tf.matmul(tf.reshape(XT, [-1, xd1]), W1, transpose_b=True)
        W1X = tf.matrix_transpose(tf.reshape(XTW1T, [-1, xd2, dim1]))
        y = tf.reshape(tf.reshape(W1X, [-1, xd2]) @ W2, [-1, dim1, dim2])

        # Add bias if necessary
        if self._use_bias:
            bias = tf.get_variable('bias',
                                   shape=[dim2],
                                   dtype=th.dtype,
                                   initializer=self._bias_initializer)
            y = tf.nn.bias_add(y, bias)

        # Apply activation if provided
        if self._activation: y = self._activation(y)

        return y
示例#5
0
def svd_orthogonalize(m):
    """Convert 9D representation to SO(3) using SVD orthogonalization.

  Args:
    m: [BATCH, 3, 3] 3x3 matrices.

  Returns:
    [BATCH, 3, 3] SO(3) rotation matrices.
  """
    m_transpose = tf.matrix_transpose(tf.math.l2_normalize(m, axis=-1))
    _, u, v = tf.svd(m_transpose)
    det = tf.linalg.det(tf.matmul(v, u, transpose_b=True))
    # Check orientation reflection.
    r = tf.matmul(tf.concat(
        [v[:, :, :-1], v[:, :, -1:] * tf.reshape(det, [-1, 1, 1])], 2),
                  u,
                  transpose_b=True)
    return r
示例#6
0
文件: lkj.py 项目: kiwtir/probability
 def _validate_correlationness(self, x):
   if not self.validate_args or self.input_output_cholesky:
     return x
   checks = [
       assert_util.assert_less_equal(
           dtype_util.as_numpy_dtype(x.dtype)(-1),
           x,
           message='Correlations must be >= -1.'),
       assert_util.assert_less_equal(
           x,
           dtype_util.as_numpy_dtype(x.dtype)(1),
           message='Correlations must be <= 1.'),
       assert_util.assert_near(
           tf.linalg.diag_part(x),
           dtype_util.as_numpy_dtype(x.dtype)(1),
           message='Self-correlations must be = 1.'),
       assert_util.assert_near(
           x,
           tf1.matrix_transpose(x),
           message='Correlation matrices must be symmetric')
   ]
   with tf.control_dependencies(checks):
     return tf.identity(x)
	def __init__(self, log_unnormalized_prob, gmm=None, k=10, loc=0., std=1., ndim=None, loc_tril=None,
				 samples=20, temp=1., cov_type='diag', loc_scale=1., priors_scale=1e1):
		"""

		:param log_unnormalized_prob:	Unnormalized log density to estimate
		:type log_unnormalized_prob: 	a tensorflow function that takes [batch_size, ndim]
			as input and returns [batch_size]
		:param gmm:
		:param k:		number of components for GMM approximation
		:param loc:		for initialization, mean
		:param std:		for initialization, standard deviation
		:param ndim:
		"""
		self.log_prob = log_unnormalized_prob
		self.ndim = ndim
		self.temp = temp

		if gmm is None:
			assert ndim is not None, "If no gmm is defined, should give the shape of x"

			if cov_type == 'diag':
				_log_priors_var = tf.Variable(1. / priors_scale * log_normalize(tf.ones(k)))
				log_priors = priors_scale * _log_priors_var

				if isinstance(loc, tf.Tensor) and loc.shape.ndims == 2:
					_locs_var = tf.Variable(1. / loc_scale * loc)
					locs = loc_scale * _locs_var
				else:
					_locs_var = tf.Variable(
						1. / loc_scale * tf.random.normal((k, ndim), loc, std))
					locs = loc_scale * _locs_var

				log_std_diags = tf.Variable(tf.log(std/k * tf.ones((k, ndim))))

				self._opt_params = [_log_priors_var, _locs_var, log_std_diags]

				gmm = _distributions.MixtureSameFamily(
					mixture_distribution=_distributions.Categorical(logits=log_priors),
					components_distribution=_distributions.MultivariateNormalDiag(
						loc=locs, scale_diag=tf.math.exp(log_std_diags)
					)
				)

			elif cov_type == 'full':
				_log_priors_var = tf.Variable(1./priors_scale * log_normalize(tf.ones(k)))
				log_priors = priors_scale * _log_priors_var

				if isinstance(loc, tf.Tensor) and loc.shape.ndims == 2:
					_locs_var = tf.Variable(1. / loc_scale * loc)
					locs = loc_scale * _locs_var
				else:
					_locs_var = tf.Variable(1./loc_scale * tf.random.normal((k, ndim), loc, std))
					locs = loc_scale * _locs_var


				loc_tril = loc_tril if loc_tril is not None else std/k
				# tril_cov = tf.Variable(loc_tril ** 2 * tf.eye(ndim, batch_shape=(k, )))

				tril_cov = tf.Variable(tf1.log(loc_tril) * tf.eye(ndim, batch_shape=(k, )))

				covariance = tf.linalg.expm(tril_cov + tf1.matrix_transpose(tril_cov))
				#
				self._opt_params = [_log_priors_var, _locs_var, tril_cov]

				gmm = _distributions.MixtureSameFamily(
					mixture_distribution=_distributions.Categorical(logits=log_priors),
					components_distribution=_distributions.MultivariateNormalFullCovariance(
						loc=locs, covariance_matrix=covariance
					)
				)

			else:
				raise ValueError("Unrecognized covariance type")

		self.k = k
		self.num_samples = samples
		self.gmm = gmm
示例#8
0
 def _matmul(x, y):
     if hub.sparse_gam:
         return tf.matrix_transpose(
             tf.sparse_tensor_dense_matmul(y, x, True, True))
     return tf.matmul(x, y)
示例#9
0
    def density(self, xi, t=0):
        x, dx = xi[:, :self._u_dim], xi[:, self._u_dim:]

        ys = [f(x) for f in self.fs]  # transform state
        js = [j(x) for j in self.js]  # get jacobians

        pinv_js_trsp = []
        for j in js:
            if isinstance(j, tf.linalg.LinearOperatorIdentity):
                pinv_js_trsp += [j]
            else:
                pinv_js_trsp += [
                    tf.linalg.LinearOperatorFullMatrix(
                        damped_pinv_right(tf1.matrix_transpose(j._matrix)),
                        1e-5)
                ]

        dys = [j.matvec(dx) for j in js]  # get velocities in transformed space

        # "velocities" in transformed space from the different policies
        fys_locs_covs = [
            self.pis[i](ys[i], dys[i], t) for i in range(self.n_experts)
        ]

        # separate locs and covs
        fys_locs = [_y[0] for _y in fys_locs_covs]
        fys_covs = [_y[1] for _y in fys_locs_covs]

        # precisions with regularization J^T Lambda
        fys_precs = [tf.linalg.inv(fys_covs[i]) for i in range(self.n_experts)]

        # fxs_eta = [tf.linalg.LinearOperatorFullMatrix(pinv_js_trsp[i].matmul(
        # 	fys_precs[i], adjoint=True)).matvec(
        # 	fys_locs[i]) for i in range(self.n_experts)]

        fxs_mus = [
            js[i].matvec(fys_locs[i], adjoint=True)
            for i in range(self.n_experts)
        ]

        # fxs_precs = [
        # 	tf.linalg.inv(
        # 		matquad(js[i], fys_covs[i]) + self._reg ** 2 * tf.eye(self.product_size))
        # 	for i in range(self.n_experts)
        # ]

        fxs_precs = [
            matquad(pinv_js_trsp[i], fys_precs[i])
            for i in range(self.n_experts)
        ]

        # fxs_precs = tf.Print(fxs_precs, [fxs_precs])
        fxs_eta = [
            tf.linalg.matvec(fxs_precs[i], fxs_mus[i])
            for i in range(self.n_experts)
        ]
        # compute product of Gaussian policies
        precs = tf.reduce_sum(fxs_precs, axis=0)

        covs = tf.linalg.inv(precs)

        etas = tf.reduce_sum(fxs_eta, axis=0)
        locs = tf.linalg.LinearOperatorFullMatrix(covs).matvec(etas)

        return ds.MultivariateNormalTriL(locs, tf.linalg.cholesky(covs))
示例#10
0
    def __init__(self,
                 loc=None,
                 covariance_matrix=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="MultivariateNormalFullCovariance"):
        """Construct Multivariate Normal distribution on `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and
    `covariance_matrix` arguments.

    The `event_shape` is given by last dimension of the matrix implied by
    `covariance_matrix`. The last dimension of `loc` (if provided) must
    broadcast with this.

    A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive
    definite matrix.  In other words it is (real) symmetric with all eigenvalues
    strictly positive.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      covariance_matrix: Floating-point, symmetric positive definite `Tensor` of
        same `dtype` as `loc`.  The strict upper triangle of `covariance_matrix`
        is ignored, so if `covariance_matrix` is not symmetric no error will be
        raised (unless `validate_args is True`).  `covariance_matrix` has shape
        `[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: if neither `loc` nor `covariance_matrix` are specified.
    """
        parameters = dict(locals())

        # Convert the covariance_matrix up to a scale_tril and call MVNTriL.
        with tf.name_scope(name) as name:
            with tf.name_scope("init"):
                dtype = dtype_util.common_dtype([loc, covariance_matrix],
                                                tf.float32)
                loc = loc if loc is None else tf.convert_to_tensor(
                    value=loc, name="loc", dtype=dtype)
                if covariance_matrix is None:
                    scale_tril = None
                else:
                    covariance_matrix = tf.convert_to_tensor(
                        value=covariance_matrix,
                        name="covariance_matrix",
                        dtype=dtype)
                    if validate_args:
                        covariance_matrix = distribution_util.with_dependencies(
                            [
                                assert_util.assert_near(
                                    covariance_matrix,
                                    tf1.matrix_transpose(covariance_matrix),
                                    message="Matrix was not symmetric")
                            ], covariance_matrix)
                    # No need to validate that covariance_matrix is non-singular.
                    # LinearOperatorLowerTriangular has an assert_non_singular method that
                    # is called by the Bijector.
                    # However, cholesky() ignores the upper triangular part, so we do need
                    # to separately assert symmetric.
                    scale_tril = tf.linalg.cholesky(covariance_matrix)
                super(MultivariateNormalFullCovariance,
                      self).__init__(loc=loc,
                                     scale_tril=scale_tril,
                                     validate_args=validate_args,
                                     allow_nan_stats=allow_nan_stats,
                                     name=name)
        self._parameters = parameters