def loop_fn(i): rates_i = array_ops.gather(rates, i) # Test both scalar and non-scalar params and shapes. return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]), random_ops.random_poisson(lam=rates_i, shape=[]), random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]), random_ops.random_poisson(lam=rates_i, shape=[3]))
def testDTypeCombinationsV2(self): """Tests random_poisson_v2() for all supported dtype combinations.""" with self.cached_session(): for lam_dt in _SUPPORTED_DTYPES: for out_dt in _SUPPORTED_DTYPES: random_ops.random_poisson( constant_op.constant([1], dtype=lam_dt), [10], dtype=out_dt).eval()
def testNoCSE(self): """CSE = constant subexpression eliminator. SetIsStateful() should prevent two identical random ops from getting merged. """ for dtype in dtypes.float16, dtypes.float32, dtypes.float64: with self.cached_session(): rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype) rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype) diff = rnd2 - rnd1 # Since these are all positive integers, the norm will # be at least 1 if they are different. self.assertGreaterEqual(np.linalg.norm(diff.eval()), 1)
def testNoCSE(self): """CSE = constant subexpression eliminator. SetIsStateful() should prevent two identical random ops from getting merged. """ for dtype in dtypes.float16, dtypes.float32, dtypes.float64: with self.cached_session(use_gpu=True): rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype) rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype) diff = rnd2 - rnd1 # Since these are all positive integers, the norm will # be at least 1 if they are different. self.assertGreaterEqual(np.linalg.norm(diff.eval()), 1)
def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False): """Given `inputs` tensors, stochastically resamples each at a given rate. For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates tensor contains `[3, 1]`, then the return value may look like `[[a1, a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are possible, since this is stochastic -- averaged over many repeated calls, each set of inputs should appear in the output `rate` times the number of invocations. Args: inputs: A list of tensors, each of which has a shape of `[batch_size, ...]` rates: A tensor of shape `[batch_size]` contiaining the resampling rates for each input. scope: Scope for the op. seed: Random seed to use. back_prop: Whether to allow back-propagation through this op. Returns: Selections from the input tensors. """ with ops.name_scope(scope, default_name='resample_at_rate', values=list(inputs) + [rates]): rates = ops.convert_to_tensor(rates, name='rates') # random_poisson does not support rates of size 0 (b/36076216) sample_counts = math_ops.cast(control_flow_ops.cond( array_ops.shape(rates)[0] > 0, lambda: random_ops.random_poisson(rates, (), rates.dtype, seed=seed), lambda: array_ops.zeros(shape=[0], dtype=rates.dtype)), dtypes.int32) sample_indices = _repeat_range(sample_counts) if not back_prop: sample_indices = array_ops.stop_gradient(sample_indices) return [array_ops.gather(x, sample_indices) for x in inputs]
def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False): """Given `inputs` tensors, stochastically resamples each at a given rate. For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates tensor contains `[3, 1]`, then the return value may look like `[[a1, a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are possible, since this is stochastic -- averaged over many repeated calls, each set of inputs should appear in the output `rate` times the number of invocations. Args: inputs: A list of tensors, each of which has a shape of `[batch_size, ...]` rates: A tensor of shape `[batch_size]` contiaining the resampling rates for each input. scope: Scope for the op. seed: Random seed to use. back_prop: Whether to allow back-propagation through this op. Returns: Selections from the input tensors. """ with ops.name_scope(scope, default_name='resample_at_rate', values=list(inputs) + [rates]): rates = ops.convert_to_tensor(rates, name='rates') sample_counts = math_ops.cast( random_ops.random_poisson(rates, (), rates.dtype, seed=seed), dtypes.int32) sample_indices = _repeat_range(sample_counts) if not back_prop: sample_indices = array_ops.stop_gradient(sample_indices) return [array_ops.gather(x, sample_indices) for x in inputs]
def _sample_n(self, n, seed=None): # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get # ids as a [n]-shaped vector. batch_size = (np.prod(self.batch_shape.as_list(), dtype=np.int32) if self.batch_shape.is_fully_defined() else math_ops.reduce_prod(self.batch_shape_tensor())) ids = self._mixture_distribution.sample( sample_shape=concat_vectors( [n], distribution_util.pick_vector( self.is_scalar_batch(), np.int32([]), [batch_size])), seed=distribution_util.gen_new_seed( seed, "poisson_lognormal_quadrature_compound")) # Stride `quadrature_size` for `batch_size` number of times. offset = math_ops.range(start=0, limit=batch_size * self._quadrature_size, delta=self._quadrature_size, dtype=ids.dtype) ids += offset rate = array_ops.gather( array_ops.reshape(self.distribution.rate, shape=[-1]), ids) rate = array_ops.reshape( rate, shape=concat_vectors([n], self.batch_shape_tensor())) return random_ops.random_poisson( lam=rate, shape=[], dtype=self.dtype, seed=seed)
def testSizeTooLarge(self): with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), "overflow"): rate = constant_op.constant(1.0, shape=(4, 4, 4, 4, 4)) self.evaluate( random_ops.random_poisson(shape=[46902, 51188, 34063, 59195], lam=rate))
def _sample_n(self, n, seed=None): # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get # ids as a [n]-shaped vector. batch_size = (np.prod(self.batch_shape.as_list(), dtype=np.int32) if self.batch_shape.is_fully_defined() else math_ops.reduce_prod(self.batch_shape_tensor())) ids = self._mixture_distribution.sample( sample_shape=concat_vectors([n], distribution_util.pick_vector( self.is_scalar_batch(), np.int32([]), [batch_size])), seed=distribution_util.gen_new_seed( seed, "poisson_lognormal_quadrature_compound")) # Stride `quadrature_size` for `batch_size` number of times. offset = math_ops.range(start=0, limit=batch_size * self._quadrature_size, delta=self._quadrature_size, dtype=ids.dtype) ids += offset rate = array_ops.gather( array_ops.reshape(self.distribution.rate, shape=[-1]), ids) rate = array_ops.reshape(rate, shape=concat_vectors( [n], self.batch_shape_tensor())) return random_ops.random_poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)
def poisson(lam=1.0, size=None): if size is None: size = () elif np_utils.isscalar(size): size = (size, ) return np_utils.tensor_to_ndarray( random_ops.random_poisson(shape=size, lam=lam, dtype=np_dtypes.int64))
def func(): with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed) ret = np.empty([10, num]) for i in xrange(10): ret[i, :] = self.evaluate(rng) return ret
def func(): with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed) ret = np.empty([10, num]) for i in xrange(10): ret[i, :] = sess.run(rng) return ret
def _sample_n(self, n, seed=None): # Here we use the fact that if: # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs) # then X ~ Poisson(lam) is Negative Binomially distributed. rate = random_ops.random_gamma(shape=[n], alpha=self.total_count, beta=math_ops.exp(-self.logits), dtype=self.dtype, seed=seed) return random_ops.random_poisson(rate, shape=[], dtype=self.dtype, seed=distribution_util.gen_new_seed( seed, "negative_binom"))
def _sample_n(self, n, seed=None): # Here we use the fact that if: # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs) # then X ~ Poisson(lam) is Negative Binomially distributed. rate = random_ops.random_gamma( shape=[n], alpha=self.total_count, beta=math_ops.exp(-self.logits), dtype=self.dtype, seed=seed) return random_ops.random_poisson( rate, shape=[], dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "negative_binom"))
def testShape(self): # Fully known shape rnd = random_ops.random_poisson(2.0, [150], seed=12345) self.assertEqual([150], rnd.get_shape().as_list()) rnd = random_ops.random_poisson( lam=array_ops.ones([1, 2, 3]), shape=[150], seed=12345) self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list()) rnd = random_ops.random_poisson( lam=array_ops.ones([1, 2, 3]), shape=[20, 30], seed=12345) self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list()) rnd = random_ops.random_poisson( lam=array_ops.placeholder(dtypes.float32, shape=(2,)), shape=[12], seed=12345) self.assertEqual([12, 2], rnd.get_shape().as_list()) # Partially known shape. rnd = random_ops.random_poisson( lam=array_ops.ones([7, 3]), shape=array_ops.placeholder(dtypes.int32, shape=(1,)), seed=12345) self.assertEqual([None, 7, 3], rnd.get_shape().as_list()) rnd = random_ops.random_poisson( lam=array_ops.ones([9, 6]), shape=array_ops.placeholder(dtypes.int32, shape=(3,)), seed=12345) self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list()) # Unknown shape. rnd = random_ops.random_poisson( lam=array_ops.placeholder(dtypes.float32), shape=array_ops.placeholder(dtypes.int32), seed=12345) self.assertIs(None, rnd.get_shape().ndims) rnd = random_ops.random_poisson( lam=array_ops.placeholder(dtypes.float32), shape=[50], seed=12345) self.assertIs(None, rnd.get_shape().ndims)
def _sample_n(self, n, seed=None): # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get # ids as a [n]-shaped vector. batch_size = self.batch_shape.num_elements() if batch_size is None: batch_size = math_ops.reduce_prod(self.batch_shape_tensor()) # We need to "sample extra" from the mixture distribution if it doesn't # already specify a probs vector for each batch coordinate. # We only support this kind of reduced broadcasting, i.e., there is exactly # one probs vector for all batch dims or one for each. ids = self._mixture_distribution.sample( sample_shape=concat_vectors( [n], distribution_util.pick_vector( self.mixture_distribution.is_scalar_batch(), [batch_size], np.int32([]))), seed=distribution_util.gen_new_seed( seed, "poisson_lognormal_quadrature_compound")) # We need to flatten batch dims in case mixture_distribution has its own # batch dims. ids = array_ops.reshape(ids, shape=concat_vectors( [n], distribution_util.pick_vector( self.is_scalar_batch(), np.int32([]), np.int32([-1])))) # Stride `quadrature_size` for `batch_size` number of times. offset = math_ops.range(start=0, limit=batch_size * self._quadrature_size, delta=self._quadrature_size, dtype=ids.dtype) ids += offset rate = array_ops.gather( array_ops.reshape(self.distribution.rate, shape=[-1]), ids) rate = array_ops.reshape(rate, shape=concat_vectors( [n], self.batch_shape_tensor())) return random_ops.random_poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _sample_n(self, n, seed=None): # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get # ids as a [n]-shaped vector. batch_size = self.batch_shape.num_elements() if batch_size is None: batch_size = math_ops.reduce_prod(self.batch_shape_tensor()) # We need to "sample extra" from the mixture distribution if it doesn't # already specify a probs vector for each batch coordinate. # We only support this kind of reduced broadcasting, i.e., there is exactly # one probs vector for all batch dims or one for each. ids = self._mixture_distribution.sample( sample_shape=concat_vectors( [n], distribution_util.pick_vector( self.mixture_distribution.is_scalar_batch(), [batch_size], np.int32([]))), seed=distribution_util.gen_new_seed( seed, "poisson_lognormal_quadrature_compound")) # We need to flatten batch dims in case mixture_distribution has its own # batch dims. ids = array_ops.reshape(ids, shape=concat_vectors( [n], distribution_util.pick_vector( self.is_scalar_batch(), np.int32([]), np.int32([-1])))) # Stride `quadrature_size` for `batch_size` number of times. offset = math_ops.range(start=0, limit=batch_size * self._quadrature_size, delta=self._quadrature_size, dtype=ids.dtype) ids += offset rate = array_ops.gather( array_ops.reshape(self.distribution.rate, shape=[-1]), ids) rate = array_ops.reshape( rate, shape=concat_vectors([n], self.batch_shape_tensor())) return random_ops.random_poisson( lam=rate, shape=[], dtype=self.dtype, seed=seed)
def MpcRandom_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None): dtype = dtype_check_and_set(dtype) #return random_ops.random_poisson(shape, mean, stddev, dtype, seed, name) return random_ops.random_poisson(lam, shape, dtype, seed, name)
def loop_fn(_): return random_ops.random_poisson(lam=[1.3], shape=[3])
def testZeroShape(self): with self.cached_session(): rnd = random_ops.random_poisson([], [], seed=12345) self.assertEqual([0], rnd.get_shape().as_list()) self.assertAllClose(np.array([], dtype=np.float32), rnd.eval())
def poisson(lam=1.0, size=None): if size is None: size = () elif np_utils.isscalar(size): size = (size,) return random_ops.random_poisson(shape=size, lam=lam, dtype=np_dtypes.int_)
def _sample_n(self, n, seed=None): return random_ops.random_poisson( self.rate, [n], dtype=self.dtype, seed=seed)
def testZeroShape(self): with self.cached_session(): rnd = random_ops.random_poisson([], [], seed=12345) self.assertEqual([0], rnd.get_shape().as_list()) self.assertAllClose(np.array([], dtype=np.float32), self.evaluate(rnd))
def testInfRate(self): sample = random_ops.random_poisson(shape=[2], lam=np.inf) self.assertAllEqual([np.inf, np.inf], self.evaluate(sample))