def __iter__(self): if not am_i_wrapped(): for i in self.subsample: yield i if isinstance(i, numbers.Number) else i.item() else: indep_context = poutine.indep(name=self.name, size=self.subsample_size) with poutine.scale(scale=self.size / self.subsample_size): for i in self.subsample: indep_context.next_context() with indep_context: # convert to python numeric type as functions like torch.ones(*args) # do not work with dim 0 torch.Tensor instances. yield i if isinstance(i, numbers.Number) else i.item()
def __enter__(self): self._wrapped = am_i_wrapped() self.dim = _DIM_ALLOCATOR.allocate(self.name, self.dim) if self._wrapped: try: self._scale_messenger = poutine.scale(scale=self.size / self.subsample_size) self._indep_messenger = poutine.indep(name=self.name, size=self.subsample_size, dim=self.dim) self._scale_messenger.__enter__() self._indep_messenger.__enter__() except BaseException: _DIM_ALLOCATOR.free(self.name, self.dim) raise return self.subsample
def irange(name, size, subsample_size=None, subsample=None, use_cuda=None): """ Non-vectorized version of ``iarange``. See ``iarange`` for details. :param str name: A name that will be used for this site in a Trace. :param int size: The size of the collection being subsampled (like ``stop`` in builtin ``range``). :param int subsample_size: Size of minibatches used in subsampling. Defaults to ``size``. :param subsample: Optional custom subsample for user-defined subsampling schemes. If specified, then ``subsample_size`` will be set to ``len(subsample)``. :type subsample: Anything supporting ``len()``. :param bool use_cuda: Optional bool specifying whether to use cuda tensors for internal ``log_pdf`` computations. Defaults to ``torch.Tensor.is_cuda``. :return: A generator yielding a sequence of integers. Examples:: >>> for i in irange('data', 100, subsample_size=10): if z[i]: # Prevents vectorization. observe('obs_{}'.format(i), normal, data[i], mu, sigma) See `SVI Part II <http://pyro.ai/examples/svi_part_ii.html>`_ for an extended discussion. """ subsample, scale = _subsample(name, size, subsample_size, subsample, use_cuda) if isinstance(subsample, Variable): subsample = subsample.data if len(_PYRO_STACK) == 0: for i in subsample: yield i else: indep_context = poutine.indep(None, name, vectorized=False) with poutine.scale(None, scale): for i in subsample: with indep_context: yield i
def iarange(name, size=None, subsample_size=None, subsample=None, use_cuda=None): """ Context manager for conditionally independent ranges of variables. ``iarange`` is similar to ``torch.arange`` in that it yields an array of indices by which other tensors can be indexed. ``iarange`` differs from ``torch.arange`` in that it also informs inference algorithms that the variables being indexed are conditionally independent. To do this, ``iarange`` is a provided as context manager rather than a function, and users must guarantee that all computation within an ``iarange`` context is conditionally independent:: with iarange("name", size) as ind: # ...do conditionally independent stuff with ind... Additionally, ``iarange`` can take advantage of the conditional independence assumptions by subsampling the indices and informing inference algorithms to scale various computed values. This is typically used to subsample minibatches of data:: with iarange("data", len(data), subsample_size=100) as ind: batch = data[ind] assert len(batch) == 100 By default ``subsample_size=False`` and this simply yields a ``torch.arange(0, size)``. If ``0 < subsample_size <= size`` this yields a single random batch of indices of size ``subsample_size`` and scales all log likelihood terms by ``size/batch_size``, within this context. .. warning:: This is only correct if all computation is conditionally independent within the context. :param str name: A unique name to help inference algorithms match ``iarange`` sites between models and guides. :param int size: Optional size of the collection being subsampled (like `stop` in builtin `range`). :param int subsample_size: Size of minibatches used in subsampling. Defaults to `size`. :param subsample: Optional custom subsample for user-defined subsampling schemes. If specified, then `subsample_size` will be set to `len(subsample)`. :type subsample: Anything supporting `len()`. :param bool use_cuda: Optional bool specifying whether to use cuda tensors for `subsample` and `log_pdf`. Defaults to `torch.Tensor.is_cuda`. :return: A context manager yielding a single 1-dimensional `torch.Tensor` of indices. Examples:: # This version simply declares independence: >>> with iarange('data'): observe('obs', normal, data, mu, sigma) # This version subsamples data in vectorized way: >>> with iarange('data', 100, subsample_size=10) as ind: observe('obs', normal, data.index_select(0, ind), mu, sigma) # This wraps a user-defined subsampling method for use in pyro: >>> ind = my_custom_subsample >>> with iarange('data', 100, subsample=ind): observe('obs', normal, data.index_select(0, ind), mu, sigma) See `SVI Part II <http://pyro.ai/examples/svi_part_ii.html>`_ for an extended discussion. """ subsample, scale = _subsample(name, size, subsample_size, subsample, use_cuda) if len(_PYRO_STACK) == 0: yield subsample else: with poutine.scale(None, scale): with poutine.indep(None, name, vectorized=True): yield subsample