def _apply_blur(image):
    image_ = tf.expand_dims(image, axis=0)

    kernel_ = tf.expand_dims(tf.expand_dims(tf.eye(3), 0), 0)

    # Horizontal blur with distance sampled from beta distribution
    horizontal_blur_ = tf.random_gamma([], horizontal_blur_alpha_)
    horizontal_blur_ = horizontal_blur_ / (
        horizontal_blur_ + tf.random_gamma([], horizontal_blur_beta_))
    horizontal_blur_ = tf.cast(horizontal_blur_ * horizontal_blur_max_,
                               tf.int32) + 1

    horizontal_blur_kernel_ = tf.tile(kernel_, (1, horizontal_blur_, 1, 1))
    horizontal_blur_kernel_ = horizontal_blur_kernel_ / tf.cast(
        horizontal_blur_, tf.float32)

    image_ = tf.nn.conv2d(image_, horizontal_blur_kernel_, [1, 1, 1, 1],
                          'SAME')

    # Vertical blur with distance sampled from beta distribution
    vertical_blur_ = tf.random_gamma([], vertical_blur_alpha_)
    vertical_blur_ = vertical_blur_ / (
        vertical_blur_ + tf.random_gamma([], vertical_blur_beta_))
    vertical_blur_ = tf.cast(vertical_blur_ * vertical_blur_max_, tf.int32) + 1

    vertical_blur_kernel_ = tf.tile(kernel_, (vertical_blur_, 1, 1, 1))
    vertical_blur_kernel_ = vertical_blur_kernel_ / tf.cast(
        vertical_blur_, tf.float32)

    image_ = tf.nn.conv2d(image_, vertical_blur_kernel_, [1, 1, 1, 1], 'SAME')

    return image_[0]
Exemplo n.º 2
0
    def test_conv1d_weighted_gram(self,
                                  input_size,
                                  kernel_size,
                                  output_size,
                                  padding,
                                  strides,
                                  dtype=tf.float64,
                                  atol=1.e-9,
                                  block_size=0):
        num_targets = 3
        batch_size = 2
        input_channels = 13
        output_channels = 11

        w = tf.random_normal(
            dtype=dtype, shape=[kernel_size, input_channels, output_channels])
        d = tf.random_gamma(
            alpha=1,
            dtype=dtype,
            shape=[num_targets, batch_size, output_size, output_channels])
        beta = tf.random_gamma(
            alpha=1,
            dtype=dtype,
            shape=[num_targets, batch_size, input_size, input_channels])

        proj = gram_calcs.conv_weighted_gram_abs_projection(
            w, d, beta, padding, strides, block_size=block_size)
        proj_slow = conv_weighted_gram_abs_projection_slow(
            w, d, beta, padding, strides)

        with tf.Session() as session:
            proj_val, proj_slow_val = session.run((proj, proj_slow))
            self.assertAllClose(proj_val, proj_slow_val, atol=atol)
    def sample(self):
        alpha_sample = tf.random_gamma(shape=(), alpha=self.alpha)
        beta_sample = tf.random_gamma(shape=(), alpha=self.beta)

        sample = alpha_sample / tf.maximum(x=(alpha_sample + beta_sample),
                                           y=1e-6)

        return self.min_value + sample * (self.max_value - self.min_value)
Exemplo n.º 4
0
def q_theta_x(name, lay, x, reuse=False):
    with tf.variable_scope('q_theta_x' + name, reuse=reuse):
        # if lay == 1:
        #     bb = sio.loadmat('C:/Users/yulai/Dropbox/[201709]Variance Reduction/Python Code/data/tmpmat5111.mat')
        #     theta1 = tf.constant(bb['theta1'], dtype=tf.float32)
        #     theta_alpha1 = tf.constant(bb['theta_alpha1'], dtype=tf.float32)
        #     theta_beta1 = tf.constant(bb['theta_beta1'], dtype=tf.float32)
        # else:
        # h1 = x
        h1 = tf.layers.batch_normalization(x)
        h1 = tf.nn.relu(
            tf.layers.dense(h1, units=h_dim[lay], kernel_initializer=tf.random_normal_initializer(0, 0.001)))
        h1 = tf.nn.relu(
            tf.layers.dense(h1, units=h_dim[lay], kernel_initializer=tf.random_normal_initializer(0, 0.001)))

        theta_Ralpha1 = tf.layers.dense(h1, units=K[lay], kernel_initializer=tf.random_normal_initializer(0, 0.001))
        theta_Ralpha1 = max_m_grad(min_theta_alpha_rate, theta_Ralpha1)
        theta_alpha1 = tf.nn.softplus(theta_Ralpha1)

        theta_Rbeta1 = tf.layers.dense(h1, units=K[lay], kernel_initializer=tf.random_normal_initializer(0, 0.001))
        theta_Rbeta1 = max_m_grad(min_theta_beta_rate, theta_Rbeta1)
        theta_beta1 = tf.nn.softplus(theta_Rbeta1)

        theta_hat1s = tf.random_gamma([1], tf.stop_gradient(theta_alpha1), 1.)
        theta_hat1s = tf.minimum(theta_hat1s,
                                 theta_alpha1 + tf.maximum(3., 2. * tf.log(theta_alpha1)) * tf.sqrt(theta_alpha1))
        theta_hat1s = tf.maximum(min_theta, tf.squeeze(theta_hat1s, 0))
        Grad_theta_alpha1 = GO_Gamma_v2(tf.stop_gradient(theta_hat1s), tf.stop_gradient(theta_alpha1))
        theta_hat1 = theta_alpha1 * tf.stop_gradient(Grad_theta_alpha1) - \
                     tf.stop_gradient(theta_alpha1 * Grad_theta_alpha1) + \
                     tf.stop_gradient(theta_hat1s)

        theta1 = theta_hat1 / theta_beta1

        # Next Layer - c_j
        # h2 = theta1
        h2 = tf.layers.batch_normalization(theta1)
        # h2 = tf.nn.relu(tf.layers.dense(h2, units=128, kernel_initializer=tf.random_normal_initializer(0, 0.001)))

        c_alpha2 = tf.layers.dense(h2, units=1, kernel_initializer=tf.random_normal_initializer(0, 0.001))
        c_alpha2 = tf.nn.softplus(max_m_grad(min_theta_alpha_rate, c_alpha2))

        c_beta2 = tf.layers.dense(h2, units=1, kernel_initializer=tf.random_normal_initializer(0, 0.001))
        c_beta2 = tf.nn.softplus(max_m_grad(min_theta_beta_rate, c_beta2))

        c_hat2s = tf.random_gamma([1], tf.stop_gradient(c_alpha2), 1.)
        c_hat2s = tf.minimum(c_hat2s, c_alpha2 + tf.maximum(3., 2. * tf.log(c_alpha2)) * tf.sqrt(c_alpha2))
        c_hat2s = tf.maximum(min_theta, tf.squeeze(c_hat2s, 0))
        Grad_c_alpha2 = GO_Gamma_v2(tf.stop_gradient(c_hat2s), tf.stop_gradient(c_alpha2))
        c_hat2 = c_alpha2 * tf.stop_gradient(Grad_c_alpha2) - \
                 tf.stop_gradient(c_alpha2 * Grad_c_alpha2) + \
                 tf.stop_gradient(c_hat2s)

        c2 = 0.1 + c_hat2 / c_beta2
        # c2 = max_m_grad(0.3, c2)

        return theta1, theta_alpha1, theta_beta1, c2, c_alpha2, c_beta2
Exemplo n.º 5
0
    def sample(self):
        # Non-deterministic: sample action using gamma distribution
        alpha_sample = tf.random_gamma(shape=(), alpha=self.alpha)
        beta_sample = tf.random_gamma(shape=(), alpha=self.beta)

        sampled = beta_sample / tf.maximum(x=(alpha_sample + beta_sample),
                                           y=1e-6)

        return sampled
Exemplo n.º 6
0
 def testShape(self):
     # Fully known shape.
     rnd = tf.random_gamma([150], 2.0)
     self.assertEqual([150], rnd.get_shape().as_list())
     rnd = tf.random_gamma([150], 2.0, beta=[3.0, 4.0])
     self.assertEqual([150, 2], rnd.get_shape().as_list())
     rnd = tf.random_gamma([150], tf.ones([1, 2, 3]))
     self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
     rnd = tf.random_gamma([20, 30], tf.ones([1, 2, 3]))
     self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
     rnd = tf.random_gamma([123], tf.placeholder(tf.float32, shape=(2, )))
     self.assertEqual([123, 2], rnd.get_shape().as_list())
     # Partially known shape.
     rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(1, )),
                           tf.ones([7, 3]))
     self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
     rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(3, )),
                           tf.ones([9, 6]))
     self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
     # Unknown shape.
     rnd = tf.random_gamma(tf.placeholder(tf.int32),
                           tf.placeholder(tf.float32))
     self.assertIs(None, rnd.get_shape().ndims)
     rnd = tf.random_gamma([50], tf.placeholder(tf.float32))
     self.assertIs(None, rnd.get_shape().ndims)
Exemplo n.º 7
0
    def sample(self):
        deterministic = self.mean

        alpha_sample = tf.random_gamma(shape=(), alpha=self.alpha)
        beta_sample = tf.random_gamma(shape=(), alpha=self.beta)

        sample = beta_sample / tf.maximum(x=(alpha_sample + beta_sample),
                                          y=util.epsilon)

        return self.min_value + tf.where(condition=self.deterministic, x=deterministic, y=sample) * \
                                (self.max_value - self.min_value)
Exemplo n.º 8
0
 def _sample_n(self, n, seed=None):
     seed = seed_stream.SeedStream(seed, "gamma_gamma")
     rate = tf.random_gamma(shape=[n],
                            alpha=self.mixing_concentration,
                            beta=self.mixing_rate,
                            dtype=self.dtype,
                            seed=seed())
     return tf.random_gamma(shape=[],
                            alpha=self.concentration,
                            beta=rate,
                            dtype=self.dtype,
                            seed=seed())
Exemplo n.º 9
0
  def testNoCSE(self):
    """CSE = constant subexpression eliminator.

    SetIsStateful() should prevent two identical random ops from getting
    merged.
    """
    for dtype in tf.float16, tf.float32, tf.float64:
      for use_gpu in [False, True]:
        with self.test_session(use_gpu=use_gpu):
          rnd1 = tf.random_gamma([24], 2.0, dtype=dtype)
          rnd2 = tf.random_gamma([24], 2.0, dtype=dtype)
          diff = rnd2 - rnd1
          self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
Exemplo n.º 10
0
 def _sample_n(self, n, seed=None):
   seed = seed_stream.SeedStream(seed, "gamma_gamma")
   rate = tf.random_gamma(
       shape=[n],
       alpha=self.mixing_concentration,
       beta=self.mixing_rate,
       dtype=self.dtype,
       seed=seed())
   return tf.random_gamma(
       shape=[],
       alpha=self.concentration,
       beta=rate,
       dtype=self.dtype,
       seed=seed())
Exemplo n.º 11
0
    def tf_sample(self, distr_params, deterministic):
        alpha, beta, alpha_beta, _ = distr_params

        # Deterministic: mean as action
        definite = beta / alpha_beta

        # Non-deterministic: sample action using gamma distribution
        alpha_sample = tf.random_gamma(shape=(), alpha=alpha)
        beta_sample = tf.random_gamma(shape=(), alpha=beta)

        sampled = beta_sample / tf.maximum(x=(alpha_sample + beta_sample), y=util.epsilon)

        return self.min_value + (self.max_value - self.min_value) * \
            tf.where(condition=deterministic, x=definite, y=sampled)
Exemplo n.º 12
0
 def _sample_n(self, n, seed=None):
     expanded_concentration1 = tf.ones_like(
         self.total_concentration, dtype=self.dtype) * self.concentration1
     expanded_concentration0 = tf.ones_like(
         self.total_concentration, dtype=self.dtype) * self.concentration0
     gamma1_sample = tf.random_gamma(shape=[n],
                                     alpha=expanded_concentration1,
                                     dtype=self.dtype,
                                     seed=seed)
     gamma2_sample = tf.random_gamma(shape=[n],
                                     alpha=expanded_concentration0,
                                     dtype=self.dtype,
                                     seed=util.gen_new_seed(seed, "beta"))
     beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
     return beta_sample
Exemplo n.º 13
0
def q_W(name, V, K, reuse=False):
    with tf.variable_scope('q_W' + name, reuse=reuse):
        W_aW = tf.get_variable("W_aW", [V, K], tf.float32,
                               tf.random_uniform_initializer(0.1, 10))
        RW_aW = max_m_grad(min_W_alpha_rate, W_aW)
        # RW_aW = tf.maximum(min_W_alpha_rate, W_aW)
        W_alpha = tf.nn.softplus(RW_aW)

        W_bW = tf.get_variable("W_bW", [V, K], tf.float32,
                               tf.random_uniform_initializer(0.1, 10))
        RW_bW = max_m_grad(min_W_beta_rate, W_bW)
        # RW_bW = tf.maximum(min_W_beta_rate, W_bW)
        W_beta = tf.nn.softplus(RW_bW)
        # W_beta = tf.nn.softplus(W_bW)
        # W_beta = min_m_grad(W_alpha / min_mean, W_beta)

        if MethodName == 'GO':
            W_hat1s = tf.random_gamma([1], tf.stop_gradient(W_alpha), 1.)
            W_hat1s = tf.maximum(min_W, tf.squeeze(W_hat1s, 0))
            Grad_W_alpha1 = GO_Gamma_v2(tf.stop_gradient(W_hat1s), tf.stop_gradient(W_alpha))
            W_hat1 = W_alpha * tf.stop_gradient(Grad_W_alpha1) - \
                     tf.stop_gradient(W_alpha * Grad_W_alpha1) + \
                     tf.stop_gradient(W_hat1s)
            W1_Fcorr = tf.zeros([1])

        if MethodName == 'GRep':
            posi0 = tf.polygamma(tf.constant(0,dtype=tf.float32),W_alpha)
            posi1 = tf.polygamma(tf.constant(1,dtype=tf.float32),W_alpha)
            W_hat1s = tf.random_gamma([1], tf.stop_gradient(W_alpha), 1.)
            W_hat1s = tf.maximum(min_W, tf.squeeze(W_hat1s, 0))
            epsilo = tf.stop_gradient( (tf.log(W_hat1s)-posi0)/tf.maximum((tf.pow(posi1,0.5)),1e-8) )
            log_W_hat1 = epsilo*tf.pow(posi1,0.5)+posi0
            W_hat1 = tf.exp( log_W_hat1 )
            W1_Fcorr = tf.reduce_sum(
                    - tf.lgamma(W_alpha) + (W_alpha-1.)*log_W_hat1 - W_hat1
                    + log_W_hat1 + 0.5 * tf.log( posi1 )
                    ) 

        if MethodName == 'RSVI':
            lambda_W1 = tf.squeeze(tf.random_gamma([1], W_alpha + Bf, 1.), 0)
            lambda_W1 = tf.stop_gradient(tf.maximum(min_W, lambda_W1))
            W_hat1, W1_Fcorr = reject_h_boosted(lambda_W1, W_alpha)

        W = W_hat1 / W_beta
        # W = tf.maximum(min_W, W)
        W = max_m_grad(min_W, W)

        return W, W_alpha, W_beta, W1_Fcorr
Exemplo n.º 14
0
def q_z_x(name, x, K, reuse=False):
    with tf.variable_scope('q_z_x' + name, reuse=reuse):
        # h1 = tf.nn.relu(tf.layers.dense(x, units=h_dim[1]))
        h1 = x

        z_Ralpha1 = tf.layers.dense(h1, units=K, kernel_initializer=tf.random_normal_initializer(0, 0.01))
        z_Ralpha1 = max_m_grad(min_z_alpha_rate, z_Ralpha1)
        # z_Ralpha1 = tf.maximum(min_z_alpha_rate, z_Ralpha1)
        z_alpha1 = tf.nn.softplus(z_Ralpha1)

        z_Rbeta1 = tf.layers.dense(h1, units=K, kernel_initializer=tf.random_normal_initializer(0, 0.01))
        z_Rbeta1 = max_m_grad(min_z_beta_rate, z_Rbeta1)
        # z_Rbeta1 = tf.maximum(min_z_beta_rate, z_Rbeta1)
        z_beta1 = tf.nn.softplus(z_Rbeta1)
        # z_beta1 = min_m_grad(z_alpha1 / min_mean, z_beta1)

        if MethodName == 'GO':
            z_hat1s = tf.random_gamma([1], tf.stop_gradient(z_alpha1), 1.)
            z_hat1s = tf.maximum(min_z, tf.squeeze(z_hat1s, 0))
            Grad_z_alpha1 = GO_Gamma_v2(tf.stop_gradient(z_hat1s), tf.stop_gradient(z_alpha1))
            z_hat1 = z_alpha1 * tf.stop_gradient(Grad_z_alpha1) - \
                     tf.stop_gradient(z_alpha1 * Grad_z_alpha1) + \
                     tf.stop_gradient(z_hat1s)
            z1_Fcorr = tf.zeros([1])

        if MethodName == 'GRep':
            posi0 = tf.polygamma(tf.constant(0,dtype=tf.float32),z_alpha1)
            posi1 = tf.polygamma(tf.constant(1,dtype=tf.float32),z_alpha1)
            z_hat1s = tf.random_gamma([1], tf.stop_gradient(z_alpha1), 1.)
            z_hat1s = tf.maximum(min_z, tf.squeeze(z_hat1s, 0))
            epsilo = tf.stop_gradient( (tf.log(z_hat1s)-posi0)/tf.maximum((tf.pow(posi1,0.5)),1e-5) )
            log_z_hat1 = epsilo*tf.pow(posi1,0.5)+posi0
            z_hat1 = tf.exp( log_z_hat1 )
            z1_Fcorr = tf.reduce_sum(
                    - tf.lgamma(z_alpha1) + (z_alpha1-1.)*log_z_hat1 - z_hat1
                    + log_z_hat1 + 0.5 * tf.log( posi1 )
                    ) 

        if MethodName == 'RSVI':
            lambda_z1 = tf.squeeze(tf.random_gamma([1], z_alpha1 + Bf, 1.), 0)
            lambda_z1 = tf.stop_gradient(tf.maximum(min_z, lambda_z1))
            z_hat1, z1_Fcorr = reject_h_boosted(lambda_z1, z_alpha1)

        z1 = z_hat1 / z_beta1
        # z1 = tf.maximum(min_z, z1)
        z1 = max_m_grad(min_z, z1)

        return z1, z_alpha1, z_beta1, z1_Fcorr
Exemplo n.º 15
0
 def func():
   with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
     rng = tf.random_gamma([num], alpha, beta=beta, dtype=dtype, seed=seed)
     ret = np.empty([10, num])
     for i in xrange(10):
       ret[i, :] = sess.run(rng)
   return ret
Exemplo n.º 16
0
    def _sample_n(self, n, seed=None):
        # This implementation is equivalent to the one in tf.contrib.distributions.Wishart
        batch_shape = self.batch_shape_tensor()
        event_shape = self.event_shape_tensor()

        stream = seed_stream.SeedStream(seed=seed, salt="Wishart")

        shape = tf.concat([[n], batch_shape, event_shape], 0)

        # Sample a normal full matrix
        x = tf.random_normal(shape=shape, dtype=self.dtype, seed=stream())

        # Sample the diagonal
        g = tf.random_gamma(shape=[n],
                            alpha=self._multi_gamma_sequence(
                                0.5 * self.df, self.p),
                            beta=0.5,
                            dtype=self.dtype,
                            seed=stream())

        # Discard the upper triangular part
        x = tf.matrix_band_part(x, -1, 0)

        # Set the diagonal
        x = tf.matrix_set_diag(x, tf.sqrt(g))

        # Scale with the Scale matrix, equivalent to matmul(sqrt(diag_scale), x)
        x *= tf.sqrt(tf.exp(self.log_diag_scale[tf.newaxis, :, :, tf.newaxis]))

        return x
Exemplo n.º 17
0
def random():
    """
    求随机值
    :return:
    """
    tf.set_random_seed(100)

    # 均值(默认值=0.0)和标准差(默认值=1.0)、形状为 [M,N] 的正态分布随机数组:
    n = tf.random_normal(shape=(3, 3), mean=0.0, stddev=1.0)
    tn = tf.truncated_normal(shape=(3, 3), mean=0.0, stddev=1.0)

    # 要在种子的[minval(default = 0),maxval] 范围内创建形状为[M,N] 的给定伽马分布随机数组
    u = tf.random_uniform(shape=(3, 3), minval=1, maxval=10)

    g = tf.random_gamma(shape=(3, 3), alpha=1)

    # 数据截取
    corp = tf.random_crop(g, size=(3, 2))

    # 对数据进行混洗
    sg = tf.random_shuffle(g)
    with tf.Session() as sess:
        logger.info("random_normal\n %s" % sess.run(n))
        logger.info("truncated_normal\n %s" % sess.run(tn))
        logger.info("random_uniform\n %s" % sess.run(u))
        logger.info("random_gamma\n %s" % sess.run(g))
        logger.info("random_crop\n %s" % sess.run(corp))
        logger.info("random_shuffle\n %s" % sess.run(sg))
Exemplo n.º 18
0
    def _sample_n(self, n, seed):
        batch_shape = self.batch_shape_tensor()
        event_shape = self.event_shape_tensor()
        batch_ndims = tf.shape(batch_shape)[0]

        ndims = batch_ndims + 3  # sample_ndims=1, event_ndims=2
        shape = tf.concat([[n], batch_shape, event_shape], 0)
        stream = seed_stream.SeedStream(seed, salt="Wishart")

        # Complexity: O(nbk**2)
        x = tf.random_normal(shape=shape,
                             mean=0.,
                             stddev=1.,
                             dtype=self.dtype,
                             seed=stream())

        # Complexity: O(nbk)
        # This parametrization is equivalent to Chi2, i.e.,
        # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
        expanded_df = self.df * tf.ones(
            self.scale_operator.batch_shape_tensor(),
            dtype=self.df.dtype.base_dtype)

        g = tf.random_gamma(shape=[n],
                            alpha=self._multi_gamma_sequence(
                                0.5 * expanded_df, self.dimension),
                            beta=0.5,
                            dtype=self.dtype,
                            seed=stream())

        # Complexity: O(nbk**2)
        x = tf.matrix_band_part(x, -1, 0)  # Tri-lower.

        # Complexity: O(nbk)
        x = tf.matrix_set_diag(x, tf.sqrt(g))

        # Make batch-op ready.
        # Complexity: O(nbk**2)
        perm = tf.concat([tf.range(1, ndims), [0]], 0)
        x = tf.transpose(x, perm)
        shape = tf.concat([batch_shape, [event_shape[0]], [-1]], 0)
        x = tf.reshape(x, shape)

        # Complexity: O(nbM) where M is the complexity of the operator solving a
        # vector system. For LinearOperatorLowerTriangular, each matmul is O(k^3) so
        # this step has complexity O(nbk^3).
        x = self.scale_operator.matmul(x)

        # Undo make batch-op ready.
        # Complexity: O(nbk**2)
        shape = tf.concat([batch_shape, event_shape, [n]], 0)
        x = tf.reshape(x, shape)
        perm = tf.concat([[ndims - 1], tf.range(0, ndims - 1)], 0)
        x = tf.transpose(x, perm)

        if not self.input_output_cholesky:
            # Complexity: O(nbk**3)
            x = tf.matmul(x, x, adjoint_b=True)

        return x
Exemplo n.º 19
0
 def _sample_n(self, n, seed=None):
   gamma_sample = tf.random_gamma(
       shape=[n],
       alpha=self.concentration,
       dtype=self.dtype,
       seed=seed)
   return gamma_sample / tf.reduce_sum(gamma_sample, -1, keepdims=True)
Exemplo n.º 20
0
 def _sample_n(self, n, seed=None):
   return 1. / tf.random_gamma(
       shape=[n],
       alpha=self.concentration,
       beta=self.rate,
       dtype=self.dtype,
       seed=seed)
Exemplo n.º 21
0
 def _sample_n(self, n, seed=None):
   gamma_sample = tf.random_gamma(
       shape=[n],
       alpha=self.concentration,
       dtype=self.dtype,
       seed=seed)
   return gamma_sample / tf.reduce_sum(gamma_sample, -1, keepdims=True)
Exemplo n.º 22
0
    def _get_histogram_var_by_type(self,
                                   histogram_type,
                                   shape,
                                   name=None,
                                   **kwargs):
        with tf.name_scope(name, "get_hist_{}".format(histogram_type)):
            if histogram_type == "normal":
                # Make a normal distribution, with a shifting mean
                mean = tf.Variable(kwargs['mean'])
                stddev = tf.Variable(kwargs['stddev'])
                return tf.random_normal(
                    shape=shape, mean=mean, stddev=stddev), [mean, stddev]
            elif histogram_type == "gamma":
                # Add a gamma distribution
                alpha = tf.Variable(kwargs['alpha'])
                return tf.random_gamma(shape=shape, alpha=alpha), [alpha]
            elif histogram_type == "poisson":
                lam = tf.Variable(kwargs['lam'])
                return tf.random_poisson(shape=shape, lam=lam), [lam]
            elif histogram_type == "uniform":
                # Add a uniform distribution
                maxval = tf.Variable(kwargs['maxval'])
                return tf.random_uniform(shape=shape, maxval=maxval), [maxval]

            raise Exception('histogram type error %s' % histogram_type,
                            'builtin type', self._histogram_distribute_list)
Exemplo n.º 23
0
 def sample_pi(self, alpha, beta):
     Gam = tf.random_gamma([1],
                           alpha=alpha,
                           beta=beta,
                           name='Gam',
                           seed=None)[0]
     return self.G_inv(Gam, alpha, beta)
Exemplo n.º 24
0
 def _sample_n(self, n, seed=None):
   return 1. / tf.random_gamma(
       shape=[n],
       alpha=self.concentration,
       beta=self.rate,
       dtype=self.dtype,
       seed=seed)
Exemplo n.º 25
0
 def sample_tf(self, x, training=None):
     alphas = self.__call__(x, training)
     alphas_sample = tf.random_gamma(shape=(), alpha=alphas)
     alphas_sample_sum = utils.sum_keep_shape(alphas_sample, axis=-1)
     alphas_sample_sum = tf.maximum(x=alphas_sample_sum, y=utils.epsilon)
     sampled = alphas_sample / alphas_sample_sum
     return self.min_value + (self.max_value - self.min_value) * sampled
Exemplo n.º 26
0
def norm_posterior(dim, std0):
    """Initialise a posterior (diagonal) Normal distribution.

    Parameters
    ----------
    dim : tuple or list
        the dimension of this distribution.
    std0 : float
        the initial (unoptimized) standard deviation of this distribution.

    Returns
    -------
    Q : tf.distributions.Normal
        the initialised posterior Normal object.

    Note
    ----
    This will make tf.Variables on the randomly initialised mean and standard
    deviation of the posterior. The initialisation of the mean is from a Normal
    with zero mean, and ``std0`` standard deviation, and the initialisation of
    the standard deviation is from a gamma distribution with an alpha of
    ``std0`` and a beta of 1.

    """
    mu_0 = tf.random_normal(dim, stddev=std0, seed=next(seedgen))
    mu = tf.Variable(mu_0, name="W_mu_q")

    std_0 = tf.random_gamma(alpha=std0, shape=dim, seed=next(seedgen))
    std = pos(tf.Variable(std_0, name="W_std_q"))

    Q = tf.distributions.Normal(loc=mu, scale=std)
    return Q
Exemplo n.º 27
0
def ApplyDepthImageDistortions(depth_images,
                               random_noise_level=0.05,
                               random_noise_apply_probability=0.5,
                               scaling_noise=True,
                               gamma_shape=1000.0,
                               gamma_scale_inverse=1000.0,
                               min_depth_allowed=0.25,
                               max_depth_allowed=2.5):
    """Apply photometric distortions to the input depth images.

  Args:
    depth_images: Tensor of shape [batch_size, h, w, 1] containing a batch of
      depth images to apply the random photometric distortions to.
    random_noise_level: The standard deviation of the Gaussian distribution for
      the noise that is applied to the depth image. When 0.0, then no noise is
      applied.
    random_noise_apply_probability: Probability of applying additive random
      noise to the images.
    scaling_noise: If True; sample a random variable from a Gamma distribution
      to scale the depth image.
    gamma_shape: Float; shape parameter of a Gamma distribution.
    gamma_scale_inverse: Float; inverse of scale parameter of a Gamma
      distribution.
    min_depth_allowed: Float; minimum clip value for depth.
    max_depth_allowed: Float; max clip value for depth.

  Returns:
    depth_images: Tensor of shape [batch_size, h, w, 1] containing a
      batch of images resulting from applying random photometric distortions to
      the inputs.
  """
    assert depth_images[0].get_shape().as_list()[-1] == 1
    with tf.variable_scope('distortions_depth_images'):
        # Add random Gaussian noise.
        if random_noise_level:
            for i, image in enumerate(depth_images):
                img_shape = tf.shape(image)
                rnd_noise = tf.random_normal(img_shape,
                                             stddev=random_noise_level)

                def ReturnImageTensor(value):
                    return lambda: value

                if scaling_noise:
                    alpha = tf.random_gamma([], gamma_shape,
                                            gamma_scale_inverse)
                image = tf.cond(
                    tf.reduce_all(
                        tf.greater(tf.random.uniform([1]),
                                   random_noise_apply_probability)),
                    ReturnImageTensor(image),
                    ReturnImageTensor(alpha * image + rnd_noise))
                depth_images[i] = tf.reshape(image, img_shape)

        # Clip to valid range.
        for i, image in enumerate(depth_images):
            depth_images[i] = tf.clip_by_value(image, min_depth_allowed,
                                               max_depth_allowed)
    return depth_images
Exemplo n.º 28
0
def restore():
    v = tf.Variable(tf.random_gamma(shape=(3, 3), alpha=10))
    initial_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(initial_op)
        logger.info(v)
        saver = tf.train.Saver()
        saver.restore(sess, save_path="./saver")
Exemplo n.º 29
0
    def _sample(self, alpha):

        # broadcast alpha from scalar to vector, if necessary
        alpha = alpha * tf.ones(shape=(self.shape[0]), dtype=self.dtype)

        gammas = tf.squeeze(tf.random_gamma(shape=(1, ), alpha=alpha, beta=1))
        sample = Normalize.transform(gammas)
        return sample
Exemplo n.º 30
0
def appx_remain_mass_mean_rate(n_samp, q_omega, q_beta, ii_tot_mass_m, q_w, su,
                               tu, a, b, K):
    """
    Monte carlo approximation for expectation of the leftover mass rate.
    E[leftover mass] = (mass_mean_rate)*size

    """
    theta_rem_samp = tf.random_gamma([n_samp, K], a, b)

    # we also need to draw samples from the total item mass
    # this is potentially quite expensive, so we just approximate the sum as a Gamma

    #compute the variance of the total item mass
    ii_tot_mass_v = tf.reduce_sum(
        q_beta.variance() * q_omega.variance() +
        q_beta.variance() * tf.square(q_omega.mean()) +
        tf.square(q_beta.mean()) * q_omega.variance(),
        axis=0,
        keep_dims=True)
    ii_tot_mass_v = tf.transpose(ii_tot_mass_v)

    # params for gamma dist approximation
    ii_tot_mass_beta = ii_tot_mass_m / ii_tot_mass_v
    ii_tot_mass_alpha = ii_tot_mass_m * ii_tot_mass_beta

    # sample total item mass
    i_tot_mass_sample = tf.squeeze(
        tf.random_gamma(
            [n_samp], alpha=ii_tot_mass_alpha, beta=ii_tot_mass_beta) +
        q_w.sample([n_samp]))

    # compute the estimate
    denom = tf.pow(
        tu + tf.reduce_sum(
            theta_rem_samp * i_tot_mass_sample, axis=1, keep_dims=True),
        1 - su)
    leftover_samps = theta_rem_samp / denom
    estimated_expect = tf.reduce_mean(leftover_samps, 0,
                                      keep_dims=True)  # monte carlo average

    # can also compute variance... but it'll turn out to be really tiny
    # var_samps = (1.-su) * tf.square(theta_rem_samp) / tf.pow(tu + tf.reduce_sum(theta_rem_samp*i_tot_mass_sample, axis=1, keep_dims=True), 2-su)
    # estimated_var = tf.reduce_mean(var_samps, 0, keep_dims=True)

    return tf.transpose(estimated_expect)
Exemplo n.º 31
0
def assert_random_gamma_has_gradients():
    """Check that TensorFlow supports tf.random_gamma gradients."""
    a = tf.constant(1.0)
    gradient = tf.gradients(tf.random_gamma([], a), a)[0]
    message = (
        "This example requires tf.random_gamma gradients, introduced in"
        " TensorFlow 1.10 RC0, to work correctly."
        " Please upgrade TensorFlow to a newer version, e.g. tf-nightly.")
    assert gradient is not None, message
Exemplo n.º 32
0
 def _sample_n(self, n, seed=None):
   seed = seed_stream.SeedStream(seed, "beta")
   expanded_concentration1 = tf.ones_like(
       self.total_concentration, dtype=self.dtype) * self.concentration1
   expanded_concentration0 = tf.ones_like(
       self.total_concentration, dtype=self.dtype) * self.concentration0
   gamma1_sample = tf.random_gamma(
       shape=[n],
       alpha=expanded_concentration1,
       dtype=self.dtype,
       seed=seed())
   gamma2_sample = tf.random_gamma(
       shape=[n],
       alpha=expanded_concentration0,
       dtype=self.dtype,
       seed=seed())
   beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
   return beta_sample
Exemplo n.º 33
0
  def _sample_n(self, n, seed):
    batch_shape = self.batch_shape_tensor()
    event_shape = self.event_shape_tensor()
    batch_ndims = tf.shape(batch_shape)[0]

    ndims = batch_ndims + 3  # sample_ndims=1, event_ndims=2
    shape = tf.concat([[n], batch_shape, event_shape], 0)
    stream = seed_stream.SeedStream(seed, salt="Wishart")

    # Complexity: O(nbk**2)
    x = tf.random_normal(
        shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=stream())

    # Complexity: O(nbk)
    # This parametrization is equivalent to Chi2, i.e.,
    # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
    expanded_df = self.df * tf.ones(
        self.scale_operator.batch_shape_tensor(),
        dtype=self.df.dtype.base_dtype)

    g = tf.random_gamma(
        shape=[n],
        alpha=self._multi_gamma_sequence(0.5 * expanded_df, self.dimension),
        beta=0.5,
        dtype=self.dtype,
        seed=stream())

    # Complexity: O(nbk**2)
    x = tf.matrix_band_part(x, -1, 0)  # Tri-lower.

    # Complexity: O(nbk)
    x = tf.matrix_set_diag(x, tf.sqrt(g))

    # Make batch-op ready.
    # Complexity: O(nbk**2)
    perm = tf.concat([tf.range(1, ndims), [0]], 0)
    x = tf.transpose(x, perm)
    shape = tf.concat([batch_shape, [event_shape[0]], [-1]], 0)
    x = tf.reshape(x, shape)

    # Complexity: O(nbM) where M is the complexity of the operator solving a
    # vector system. For LinearOperatorLowerTriangular, each matmul is O(k^3) so
    # this step has complexity O(nbk^3).
    x = self.scale_operator.matmul(x)

    # Undo make batch-op ready.
    # Complexity: O(nbk**2)
    shape = tf.concat([batch_shape, event_shape, [n]], 0)
    x = tf.reshape(x, shape)
    perm = tf.concat([[ndims - 1], tf.range(0, ndims - 1)], 0)
    x = tf.transpose(x, perm)

    if not self.input_output_cholesky:
      # Complexity: O(nbk**3)
      x = tf.matmul(x, x, adjoint_b=True)

    return x
Exemplo n.º 34
0
	def elbo_sample(self):
		"""Unconditional, unboosted sample to get estimate of ELBO.
		This is a separate function to avoid stepping on freeze/thaw cycle."""
		alpha = self.get_alpha()
		mean = self.get_mean()

		gamma_draw = tf.squeeze(tf.random_gamma([1], alpha, 1.0), [0]) * (mean / alpha)
		gamma_draw = tf.maximum(1e-300, gamma_draw)
		return gamma_draw
Exemplo n.º 35
0
def random_exponential(shape, rate=1.0, dtype=tf.float32, seed=None):
    """
  Helper function to sample from the exponential distribution, which is not
  included in core TensorFlow.
  """
    return tf.random_gamma(shape,
                           alpha=1,
                           beta=1. / rate,
                           dtype=dtype,
                           seed=seed)
Exemplo n.º 36
0
def slice_sampler_one_dim(target_log_prob, x_initial, step_size=0.01,
                          max_doublings=30, seed=None, name=None):
  """For a given x position in each Markov chain, returns the next x.

  Applies the one dimensional slice sampling algorithm as defined in Neal (2003)
  to an input tensor x of shape (num_chains,) where num_chains is the number of
  simulataneous Markov chains, and returns the next tensor x of shape
  (num_chains,) when these chains are evolved by the slice sampling algorithm.

  Args:
    target_log_prob: Callable accepting a tensor like `x_initial` and returning
      a tensor containing the log density at that point of the same shape.
    x_initial: A tensor of any shape. The initial positions of the chains. This
      function assumes that all the dimensions of `x_initial` are batch
      dimensions (i.e. the event shape is `[]`).
    step_size: A tensor of shape and dtype compatible with `x_initial`. The min
      interval size in the doubling algorithm.
    max_doublings: Scalar tensor of dtype `tf.int32`. The maximum number of
      doublings to try to find the slice bounds.
    seed: (Optional) positive int. The random seed. If None, no seed is set.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., 'find_slice_bounds').

  Returns:
    retval: A tensor of the same shape and dtype as `x_initial`. The next state
      of the Markov chain.
    next_target_log_prob: The target log density evaluated at `retval`.
    bounds_satisfied: A tensor of bool dtype and shape batch dimensions.
    upper_bounds: Tensor of the same shape and dtype as `x_initial`. The upper
      bounds for the slice found.
    lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower
      bounds for the slice found.
  """
  with tf.name_scope(name, 'slice_sampler_one_dim',
                     [x_initial, step_size, max_doublings]):
    x_initial = tf.convert_to_tensor(x_initial)
    # Obtain the input dtype of the array.
    dtype = x_initial.dtype.base_dtype
    # Select the height of the slice. Tensor of shape x_initial.shape.
    log_slice_heights = target_log_prob(x_initial) - tf.random_gamma(
        tf.shape(x_initial), alpha=1, dtype=dtype, seed=seed)
    # Given the above x and slice heights, compute the bounds of the slice for
    # each chain.
    upper_bounds, lower_bounds, bounds_satisfied = slice_bounds_by_doubling(
        x_initial, target_log_prob, log_slice_heights, max_doublings, step_size,
        seed=seed)
    retval = _sample_with_shrinkage(x_initial, target_log_prob=target_log_prob,
                                    log_slice_heights=log_slice_heights,
                                    step_size=step_size,
                                    lower_bounds=lower_bounds,
                                    upper_bounds=upper_bounds, seed=seed)
    return (retval, target_log_prob(retval), bounds_satisfied,
            upper_bounds, lower_bounds)
Exemplo n.º 37
0
 def _sample_n(self, n, seed=None):
   # Here we use the fact that if:
   # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
   # then X ~ Poisson(lam) is Negative Binomially distributed.
   rate = tf.random_gamma(
       shape=[n],
       alpha=self.total_count,
       beta=tf.exp(-self.logits),
       dtype=self.dtype,
       seed=seed)
   return tf.random_poisson(
       rate,
       shape=[],
       dtype=self.dtype,
       seed=distribution_util.gen_new_seed(seed, "negative_binom"))
Exemplo n.º 38
0
 def _sample_n(self, n, seed=None):
   # Here we use the fact that if:
   # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
   # then X ~ Poisson(lam) is Negative Binomially distributed.
   stream = seed_stream.SeedStream(seed, salt="NegativeBinomial")
   rate = tf.random_gamma(
       shape=[n],
       alpha=self.total_count,
       beta=tf.exp(-self.logits),
       dtype=self.dtype,
       seed=stream())
   return tf.random_poisson(
       rate,
       shape=[],
       dtype=self.dtype,
       seed=stream())
Exemplo n.º 39
0
 def testShape(self):
   # Fully known shape.
   rnd = tf.random_gamma([150], 2.0)
   self.assertEqual([150], rnd.get_shape().as_list())
   rnd = tf.random_gamma([150], 2.0, beta=[3.0, 4.0])
   self.assertEqual([150, 2], rnd.get_shape().as_list())
   rnd = tf.random_gamma([150], tf.ones([1, 2, 3]))
   self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
   rnd = tf.random_gamma([20, 30], tf.ones([1, 2, 3]))
   self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
   rnd = tf.random_gamma([123], tf.placeholder(tf.float32, shape=(2,)))
   self.assertEqual([123, 2], rnd.get_shape().as_list())
   # Partially known shape.
   rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(1,)), tf.ones([7, 3]))
   self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
   rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(3,)), tf.ones([9, 6]))
   self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
   # Unknown shape.
   rnd = tf.random_gamma(tf.placeholder(tf.int32), tf.placeholder(tf.float32))
   self.assertIs(None, rnd.get_shape().ndims)
   rnd = tf.random_gamma([50], tf.placeholder(tf.float32))
   self.assertIs(None, rnd.get_shape().ndims)
Exemplo n.º 40
0
  def _sample_n(self, n, seed=None):
    seed = seed_stream.SeedStream(seed, "normal_gamma")
    shape = tf.concat([[n], self.batch_shape_tensor()], 0)

    precision = tf.random_gamma(
        shape=shape,
        alpha=self.concentration,
        beta=self.rate,
        dtype=self.dtype,
        seed=seed())

    scale = tf.sqrt(1 / (self._lambda * precision))
    mean = tf.random_normal(
        shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed())
    mean = mean * scale + self.loc

    return tf.concat((tf.expand_dims(mean, axis=-1),
                      tf.expand_dims(precision, axis=-1)), axis=-1)
Exemplo n.º 41
0
 def _sample_n(self, n, seed=None):
   # The sampling method comes from the fact that if:
   #   X ~ Normal(0, 1)
   #   Z ~ Chi2(df)
   #   Y = X / sqrt(Z / df)
   # then:
   #   Y ~ StudentT(df).
   seed = seed_stream.SeedStream(seed, "student_t")
   shape = tf.concat([[n], self.batch_shape_tensor()], 0)
   normal_sample = tf.random_normal(shape, dtype=self.dtype, seed=seed())
   df = self.df * tf.ones(self.batch_shape_tensor(), dtype=self.dtype)
   gamma_sample = tf.random_gamma(
       [n],
       0.5 * df,
       beta=0.5,
       dtype=self.dtype,
       seed=seed())
   samples = normal_sample * tf.rsqrt(gamma_sample / df)
   return samples * self.scale + self.loc  # Abs(scale) not wanted.
Exemplo n.º 42
0
def run_all(logdir, verbose=False):
  """Generate a bunch of histogram data, and write it to logdir."""
  del verbose

  tf.set_random_seed(0)

  k = tf.placeholder(tf.float32)

  # Make a normal distribution, with a shifting mean
  mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
  # Record that distribution into a histogram summary
  histogram_summary.op("normal/moving_mean",
                       mean_moving_normal,
                       description="A normal distribution whose mean changes "
                                   "over time.")

  # Make a normal distribution with shrinking variance
  shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
  # Record that distribution too
  histogram_summary.op("normal/shrinking_variance", shrinking_normal,
                       description="A normal distribution whose variance "
                                   "shrinks over time.")

  # Let's combine both of those distributions into one dataset
  normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0)
  # We add another histogram summary to record the combined distribution
  histogram_summary.op("normal/bimodal", normal_combined,
                       description="A combination of two normal distributions, "
                                   "one with a moving mean and one with  "
                                   "shrinking variance. The result is a "
                                   "distribution that starts as unimodal and "
                                   "becomes more and more bimodal over time.")

  # Add a gamma distribution
  gamma = tf.random_gamma(shape=[1000], alpha=k)
  histogram_summary.op("gamma", gamma,
                       description="A gamma distribution whose shape "
                                   "parameter, α, changes over time.")

  # And a poisson distribution
  poisson = tf.random_poisson(shape=[1000], lam=k)
  histogram_summary.op("poisson", poisson,
                       description="A Poisson distribution, which only "
                                   "takes on integer values.")

  # And a uniform distribution
  uniform = tf.random_uniform(shape=[1000], maxval=k*10)
  histogram_summary.op("uniform", uniform,
                       description="A simple uniform distribution.")

  # Finally, combine everything together!
  all_distributions = [mean_moving_normal, shrinking_normal,
                       gamma, poisson, uniform]
  all_combined = tf.concat(all_distributions, 0)
  histogram_summary.op("all_combined", all_combined,
                       description="An amalgamation of five distributions: a "
                                   "uniform distribution, a gamma "
                                   "distribution, a Poisson distribution, and "
                                   "two normal distributions.")

  summaries = tf.summary.merge_all()

  # Setup a session and summary writer
  sess = tf.Session()
  writer = tf.summary.FileWriter(logdir)

  # Setup a loop and write the summaries to disk
  N = 400
  for step in xrange(N):
    k_val = step/float(N)
    summ = sess.run(summaries, feed_dict={k: k_val})
    writer.add_summary(summ, global_step=step)
Exemplo n.º 43
0
 def log_dirichlet(self, size, scale=1.0):
     mu = tf.random_gamma([1], scale * np.ones(size).astype(np.float32))
     mu = tf.log(mu / tf.reduce_sum(mu))
     return mu