예제 #1
0
    def decode_where(self, input_coord_logits, input_attri_logits, input_pca_vectors, sample_mode):
        """
        Inputs:
            where_states containing
            - **coord_logits** (bsize, 1, grid_dim)
            - **attri_logits** (bsize, 1, scale_ratio_dim, grid_dim)
            - **pca_vectors**  (bsize, 1, pca_dim, grid_dim)
            sample_mode
              0: top 1, 1: multinomial

        Outputs
            - **sample_inds**   (bsize, 3)
            - **sample_vecs**   (bsize, pca_dim)
        """

        ##############################################################
        # Sampling locations
        ##############################################################

        coord_logits = input_coord_logits.squeeze(1)
        if sample_mode == 0:
            _, sample_coord_inds = torch.max(coord_logits + 1.0, dim=-1, keepdim=True)
            # print('top 1:', sample_coord_inds)
        else:
            sample_coord_inds = Categorical(coord_logits).sample().unsqueeze(-1)
            # print('multinomial:', sample_coord_inds)

        ##############################################################
        # Sampling attributes and pca vectors
        ##############################################################

        pca_vectors = input_pca_vectors.squeeze(1)
        bsize, tsize, grid_dim = pca_vectors.size()
        aux_pos_inds = sample_coord_inds.expand(bsize, tsize).unsqueeze(-1)
        sample_pca_vectors = torch.gather(pca_vectors, -1, aux_pos_inds).squeeze(-1)

        attri_logits = input_attri_logits.squeeze(1)
        bsize, tsize, grid_dim = attri_logits.size()
        aux_pos_inds = sample_coord_inds.expand(bsize, tsize).unsqueeze(-1)
        local_logits = torch.gather(attri_logits, -1, aux_pos_inds).squeeze(-1)

        scale_logits = local_logits[:, :self.cfg.num_scales]
        ratio_logits = local_logits[:, self.cfg.num_scales:]

        if sample_mode == 0:
            _, sample_scale_inds = torch.max(scale_logits + 1.0, dim=-1, keepdim=True)
            _, sample_ratio_inds = torch.max(ratio_logits + 1.0, dim=-1, keepdim=True)
        else:
            sample_scale_inds = Categorical(scale_logits).sample().unsqueeze(-1)
            sample_ratio_inds = Categorical(ratio_logits).sample().unsqueeze(-1)

        sample_inds = torch.cat(
            [sample_coord_inds, sample_scale_inds, sample_ratio_inds],
            -1
        )

        return sample_inds, sample_pca_vectors
예제 #2
0
class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

    Implementation based on [1].

    See also: :func:`torch.distributions.OneHotCategorical`

    Args:
        temperature (Tensor): relaxation temperature
        probs (Tensor): event probabilities
        logits (Tensor): unnormalized log probability for each event

    [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
    (Maddison et al, 2017)

    [2] Categorical Reparametrization with Gumbel-Softmax
    (Jang et al, 2017)
    """
    arg_constraints = {
        'probs': constraints.simplex,
        'logits': constraints.real_vector
    }
    support = constraints.real_vector  # The true support is actually a submanifold of this.
    has_rsample = True

    def __init__(self,
                 temperature,
                 probs=None,
                 logits=None,
                 validate_args=None):
        self._categorical = Categorical(probs, logits)
        self.temperature = temperature
        batch_shape = self._categorical.batch_shape
        event_shape = self._categorical.param_shape[-1:]
        super(ExpRelaxedCategorical,
              self).__init__(batch_shape,
                             event_shape,
                             validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
        batch_shape = torch.Size(batch_shape)
        new.temperature = self.temperature
        new._categorical = self._categorical.expand(batch_shape)
        super(ExpRelaxedCategorical, new).__init__(batch_shape,
                                                   self.event_shape,
                                                   validate_args=False)
        new._validate_args = self._validate_args
        return new

    def _new(self, *args, **kwargs):
        return self._categorical._new(*args, **kwargs)

    @property
    def param_shape(self):
        return self._categorical.param_shape

    @property
    def logits(self):
        return self._categorical.logits

    @property
    def probs(self):
        return self._categorical.probs

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        uniforms = clamp_probs(
            torch.rand(shape,
                       dtype=self.logits.dtype,
                       device=self.logits.device))
        gumbels = -((-(uniforms.log())).log())
        scores = (self.logits + gumbels) / self.temperature
        return scores - scores.logsumexp(dim=-1, keepdim=True)

    def log_prob(self, value):
        K = self._categorical._num_events
        if self._validate_args:
            self._validate_sample(value)
        logits, value = broadcast_all(self.logits, value)
        log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
                     self.temperature.log().mul(-(K - 1)))
        score = logits - value.mul(self.temperature)
        score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
        return score + log_scale
예제 #3
0
class OneHotCategorical(Distribution):
    r"""
    Creates a one-hot categorical distribution parameterized by :attr:`probs` or
    :attr:`logits`.

    Samples are one-hot coded vectors of size ``probs.size(-1)``.

    .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
              and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
              will return this normalized value.
              The `logits` argument will be interpreted as unnormalized log probabilities
              and can therefore be any real number. It will likewise be normalized so that
              the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
              will return this normalized value.

    See also: :func:`torch.distributions.Categorical` for specifications of
    :attr:`probs` and :attr:`logits`.

    Example::

        >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
        >>> m.sample()  # equal probability of 0, 1, 2, 3
        tensor([ 0.,  0.,  0.,  1.])

    Args:
        probs (Tensor): event probabilities
        logits (Tensor): event log probabilities (unnormalized)
    """
    arg_constraints = {
        'probs': constraints.simplex,
        'logits': constraints.real_vector
    }
    support = constraints.one_hot
    has_enumerate_support = True

    def __init__(self, probs=None, logits=None, validate_args=None):
        self._categorical = Categorical(probs, logits)
        batch_shape = self._categorical.batch_shape
        event_shape = self._categorical.param_shape[-1:]
        super(OneHotCategorical, self).__init__(batch_shape,
                                                event_shape,
                                                validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(OneHotCategorical, _instance)
        batch_shape = torch.Size(batch_shape)
        new._categorical = self._categorical.expand(batch_shape)
        super(OneHotCategorical, new).__init__(batch_shape,
                                               self.event_shape,
                                               validate_args=False)
        new._validate_args = self._validate_args
        return new

    def _new(self, *args, **kwargs):
        return self._categorical._new(*args, **kwargs)

    @property
    def _param(self):
        return self._categorical._param

    @property
    def probs(self):
        return self._categorical.probs

    @property
    def logits(self):
        return self._categorical.logits

    @property
    def mean(self):
        return self._categorical.probs

    @property
    def variance(self):
        return self._categorical.probs * (1 - self._categorical.probs)

    @property
    def param_shape(self):
        return self._categorical.param_shape

    def sample(self, sample_shape=torch.Size()):
        sample_shape = torch.Size(sample_shape)
        probs = self._categorical.probs
        num_events = self._categorical._num_events
        indices = self._categorical.sample(sample_shape)
        return torch.nn.functional.one_hot(indices, num_events).to(probs)

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        indices = value.max(-1)[1]
        return self._categorical.log_prob(indices)

    def entropy(self):
        return self._categorical.entropy()

    def enumerate_support(self, expand=True):
        n = self.event_shape[0]
        values = torch.eye(n,
                           dtype=self._param.dtype,
                           device=self._param.device)
        values = values.view((n, ) + (1, ) * len(self.batch_shape) + (n, ))
        if expand:
            values = values.expand((n, ) + self.batch_shape + (n, ))
        return values
예제 #4
0
    def decode_where(self, input_coord_logits, input_attri_logits,
                     sample_mode):
        """
        Inputs: 
            where_states containing
            - **coord_logits** (bsize, 1, grid_dim)
            - **attri_logits** (bsize, 1, sr_dim, grid_dim)
            sample_mode
              0: top 1, 1: multinomial
            
        Outputs
            - **sample_inds**   (bsize, 3)
        """
        coord_logits = input_coord_logits.squeeze(1)
        ##############################################################
        # Sampling locations
        ##############################################################

        if input_attri_logits is None:
            xy_inds = self.xymap.coords2indices(
                coord_logits[:, :2].detach().cpu().numpy())
            sample_coord_inds = torch.from_numpy(xy_inds).unsqueeze(-1).cuda()

            wh_inds = self.whmap.whs2indices(
                coord_logits[:, 2:].detach().cpu().numpy())
            wh_inds = torch.from_numpy(wh_inds).cuda()
            sample_scale_inds = wh_inds[:, 0].unsqueeze(-1)
            sample_ratio_inds = wh_inds[:, 1].unsqueeze(-1)

        else:
            if sample_mode == 0:
                _, sample_coord_inds = torch.max(coord_logits + 1.0,
                                                 dim=-1,
                                                 keepdim=True)
                # print('top 1:', sample_coord_inds)
            else:
                sample_coord_inds = Categorical(
                    coord_logits).sample().unsqueeze(-1)
                # print('multinomial:', sample_coord_inds)

            ##############################################################
            # Sampling attributes
            ##############################################################

            attri_logits = input_attri_logits.squeeze(1)

            bsize, tsize, grid_dim = attri_logits.size()
            aux_pos_inds = sample_coord_inds.expand(bsize, tsize).unsqueeze(-1)
            local_logits = torch.gather(attri_logits, -1,
                                        aux_pos_inds).squeeze(-1)

            scale_logits = local_logits[:, :self.cfg.num_scales]
            ratio_logits = local_logits[:, self.cfg.num_scales:]

            if sample_mode == 0:
                _, sample_scale_inds = torch.max(scale_logits + 1.0,
                                                 dim=-1,
                                                 keepdim=True)
                _, sample_ratio_inds = torch.max(ratio_logits + 1.0,
                                                 dim=-1,
                                                 keepdim=True)
            else:
                sample_scale_inds = Categorical(
                    scale_logits).sample().unsqueeze(-1)
                sample_ratio_inds = Categorical(
                    ratio_logits).sample().unsqueeze(-1)

        sample_inds = torch.cat(
            [sample_coord_inds, sample_scale_inds, sample_ratio_inds], -1)

        return sample_inds
예제 #5
0
class OneHotCategorical(Distribution):
    r"""
    Creates a one-hot categorical distribution parameterized by :attr:`probs` or
    :attr:`logits`.

    Samples are one-hot coded vectors of size ``probs.size(-1)``.

    .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum,
              and it will be normalized to sum to 1.

    See also: :func:`torch.distributions.Categorical` for specifications of
    :attr:`probs` and :attr:`logits`.

    Example::

        >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
        >>> m.sample()  # equal probability of 0, 1, 2, 3
        tensor([ 0.,  0.,  0.,  1.])

    Args:
        probs (Tensor): event probabilities
        logits (Tensor): event log probabilities
    """
    arg_constraints = {'probs': constraints.simplex,
                       'logits': constraints.real}
    support = constraints.simplex
    has_enumerate_support = True

    def __init__(self, probs=None, logits=None, validate_args=None):
        self._categorical = Categorical(probs, logits)
        batch_shape = self._categorical.batch_shape
        event_shape = self._categorical.param_shape[-1:]
        super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(OneHotCategorical, _instance)
        batch_shape = torch.Size(batch_shape)
        new._categorical = self._categorical.expand(batch_shape)
        super(OneHotCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

    def _new(self, *args, **kwargs):
        return self._categorical._new(*args, **kwargs)

    @property
    def _param(self):
        return self._categorical._param

    @property
    def probs(self):
        return self._categorical.probs

    @property
    def logits(self):
        return self._categorical.logits

    @property
    def mean(self):
        return self._categorical.probs

    @property
    def variance(self):
        return self._categorical.probs * (1 - self._categorical.probs)

    @property
    def param_shape(self):
        return self._categorical.param_shape

    def sample(self, sample_shape=torch.Size()):
        sample_shape = torch.Size(sample_shape)
        probs = self._categorical.probs
        one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
        indices = self._categorical.sample(sample_shape)
        if indices.dim() < one_hot.dim():
            indices = indices.unsqueeze(-1)
        return one_hot.scatter_(-1, indices, 1)

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        indices = value.max(-1)[1]
        return self._categorical.log_prob(indices)

    def entropy(self):
        return self._categorical.entropy()

    def enumerate_support(self, expand=True):
        n = self.event_shape[0]
        values = torch.eye(n, dtype=self._param.dtype, device=self._param.device)
        values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
        if expand:
            values = values.expand((n,) + self.batch_shape + (n,))
        return values
예제 #6
0
    def decode_where(self, where_states, sample_mode):
        """
        Inputs: 
            where_states containing
            - **coord_logits** (bsize, 1, grid_dim)
            - **attri_logits** (bsize, 1, 17, grid_dim)
            sample_mode
              0: top 1, 1: multinomial
            
        Outputs
            - **sample_inds**   (bsize, 5)
        """

        ##############################################################
        # Sampling locations
        ##############################################################

        coord_logits = where_states['coord_logits'].squeeze(1)
        if sample_mode == 0:
            _, sample_coord_inds = torch.max(coord_logits + 1.0,
                                             dim=-1,
                                             keepdim=True)
            # print('top 1:', sample_coord_inds)
        else:
            sample_coord_inds = Categorical(coord_logits).sample().unsqueeze(
                -1)
            # print('multinomial:', sample_coord_inds)

        ##############################################################
        # Sampling attributes
        ##############################################################

        attri_logits = where_states['attri_logits'].squeeze(1)

        bsize, tsize, grid_dim = attri_logits.size()
        aux_pos_inds = sample_coord_inds.expand(bsize, tsize).unsqueeze(-1)
        local_logits = torch.gather(attri_logits, -1, aux_pos_inds).squeeze(-1)

        pose_logits = local_logits[:, :7]
        expr_logits = local_logits[:, 7:12]
        scal_logits = local_logits[:, 12:15]
        flip_logits = local_logits[:, 15:]

        if sample_mode == 0:
            _, sample_pose_inds = torch.max(pose_logits + 1.0,
                                            dim=-1,
                                            keepdim=True)
            _, sample_expr_inds = torch.max(expr_logits + 1.0,
                                            dim=-1,
                                            keepdim=True)
            _, sample_scal_inds = torch.max(scal_logits + 1.0,
                                            dim=-1,
                                            keepdim=True)
            _, sample_flip_inds = torch.max(flip_logits + 1.0,
                                            dim=-1,
                                            keepdim=True)
        else:
            sample_pose_inds = Categorical(pose_logits).sample().unsqueeze(-1)
            sample_expr_inds = Categorical(expr_logits).sample().unsqueeze(-1)
            sample_scal_inds = Categorical(scal_logits).sample().unsqueeze(-1)
            sample_flip_inds = Categorical(flip_logits).sample().unsqueeze(-1)

        sample_inds = torch.cat([
            sample_pose_inds, sample_expr_inds, sample_coord_inds,
            sample_scal_inds, sample_flip_inds
        ], -1)

        return sample_inds