def store(self,
              state: torch.FloatTensor,
              action: torch.LongTensor,
              reward: float,
              terminal: bool = False):
        """ Store an experience of the form (s,a,r,s',t).

        Only needs the current state s (will construct transition to s'
        automatically).

        Args:
            state (torch.tensor): this turn's state tensor, or None if terminal = True
            action (torch.tensor): this turn's action index (int), or None if terminal = True
            reward (torch.tensor): this turn's reward (float)
            terminal (bool): indicates whether episode finished (boolean)
        """

        reward /= 20.0

        if isinstance(self.last_state, type(None)):  #and terminal == False:
            # first turn of trajectory, don't record since s' is needed
            self.last_state = state
            self.last_action = action
            self.last_reward = reward
            return False
        else:
            if terminal == True:
                if self.episode_length > 0:
                    # update last state's reward and set it to terminal
                    self.mem_terminal[self.last_write_pos] = float(True)
                    self.mem_reward[self.last_write_pos] += reward
                self._reset()
                return False
            else:
                # in-between turn of trajectory: record
                self.mem_state[self.write_pos] = \
                                        self.last_state.clone().detach()
                self.mem_action[self.write_pos][0] = self.last_action
                self.mem_reward[self.write_pos][0] = self.last_reward
                self.mem_next_state[self.write_pos] = state.clone().detach()
                self.mem_terminal[self.write_pos] = float(False)

                # update last encountered state
                self.last_state = state.clone().detach()
                self.last_action = action
                self.last_reward = reward

                # update write index
                self.last_write_pos = self.write_pos
                self.write_pos = (self.write_pos + 1) % self.buffer_size
                if self.buffer_count < self.buffer_size:
                    self.buffer_count += 1

                self.episode_length += 1
                return True
    def initialize_parameters(self, x: FloatTensor):
        """Data-dependent initialization of the parameters.
        The parameters are treated as trainable parameters (independent of the data) after the initialization.

        Parameters:
            x: input tensor.
        """
        if not self.training:
            return
        assert x.device == self.bias.device
        with torch.no_grad():
            bias = _mean(x.clone(), dim=0, keepdim=True)
            variance = _mean((x.clone() - bias)**2, dim=0, keepdim=True)
            logs = torch.log(self.scale / (torch.sqrt(variance) + 1e-6))
            self.bias.data.copy_(bias.data)
            self.logs.data.copy_(logs.data)
            self.initializeed = True
    def forward(
        self,
        indexed_seqs: torch.FloatTensor,
    ) -> Tuple[torch.FloatTensor, Optional[torch.BoolTensor]]:

        _masked_indexed_seqs = indexed_seqs.clone()
        _masked_indexed_seqs[:, self.src_mask] = self._mask_index
        _attn_mask = self.attn_mask.data
        return _masked_indexed_seqs, _attn_mask
Example #4
0
def masked_cross_entropy(logits: torch.FloatTensor,
                         targets: torch.LongTensor,
                         mask: torch.FloatTensor,
                         batch_average: bool = True) -> torch.FloatTensor:
    """
    Computes the cross entropy loss of a padded batch, weighted with respect to
    a user provided tensor `mask` to mask out padding elements from the loss.
    Compare to softmax & negative-log-likelihood loss from torch.nn.functional

    Parameters
    ----------
    logits : ``torch.FloatTensor``, required.
        A ``torch.FloatTensor`` of size (batch_size, max_num_classes) which
        contains the unnormalized probability for each class for each instance.
        If instances have different numbers of valid classes, each row of
        `logits` is padded with zeros up to the maximum number of classes for
        an instance in this batch.
    targets : ``torch.LongTensor``, required.
        A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
        index of the true class for each corresponding step.
    mask : ``torch.FloatTensor``, required.
        A ``torch.FloatTensor`` of size (batch, max_num_classes)
    batch_average : bool, optional, (default = True).
        A bool indicating whether the loss should be averaged across the batch,
        or returned as a vector of losses per batch element.

    Returns
    -------
    A torch.FloatTensor representing the cross entropy loss.
    If ``batch_average == True``, the returned loss is a scalar.
    If ``batch_average == False``, the returned loss is a vector of shape (batch_size,).

    """
    # shape : (batch * sequence_length, num_classes)
    masked_logits = logits.clone()
    log_probs = torch.nn.functional.softmax(masked_logits)

    # Contribution to the negative log likelihood only comes from the exact indices
    # of the targets, as the target distributions are one-hot. Here we use torch.gather
    # to extract the indices of the num_classes dimension which contribute to the loss.
    # shape : (batch * sequence_length, 1)
    negative_log_likelihood = -torch.gather(
        log_probs_flat, dim=1, index=targets_flat)
    # shape : (batch, sequence_length)
    negative_log_likelihood = negative_log_likelihood_flat.view(
        *targets.size())
    # shape : (batch, sequence_length)
    negative_log_likelihood = negative_log_likelihood * weights.float()
    # shape : (batch_size,)
    per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() +
                                                       1e-13)

    if batch_average:
        num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
        return per_batch_loss.sum() / num_non_empty_sequences
    return per_batch_loss
 def update(self, new_tensor: torch.FloatTensor):
     self.count += 1
     if self.mean is None:
         self.mean = new_tensor.clone()
         self.var = torch.zeros_like(self.mean)
     else:
         self.var = (self.count -
                     2) / (self.count - 1) * self.var + torch.pow(
                         new_tensor - self.mean, 2) / self.count
         self.mean += (new_tensor - self.mean) / self.count
Example #6
0
 def cost(
         self, w: torch.FloatTensor
 ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
     n_sample = self.X.shape[0]
     start = time.time()
     w, X, Y = w.clone().requires_grad_(), self.X.clone().requires_grad_(
     ), self.Y.clone().requires_grad_()
     loss = torch.norm(X.mv(w) - Y, p=2
                       )**2 / n_sample + self.regularization.coefficient(w)
     end = time.time()
     self.grad_i_times += (end - start)
     return loss, w
Example #7
0
def tensor_to_np(t_img: torch.FloatTensor, encoding: str, out_format: str):
    '''
    Convert a tensor image to numpy image. 
    This is sort of the inverse operation of format_tensor_img(). \\
    NOTE: this function is not optimized for speed

    Args:
        t_img: tensor image
        encoding: how tensor image is transformed.
                  Available: 'RGB_1', 'RGB_1_norm', 'BGR_255_norm'
        out_format: 'RGB_1', 'BGR_1'
    '''
    assert torch.is_tensor(t_img) and t_img.dim() == 3 and t_img.shape[0] == 3
    assert encoding in {'RGB_1', 'RGB_1_norm', 'BGR_255_norm'}
    assert out_format in {'RGB_1', 'BGR_1', 'BGR_uint8', 'RGB_uint8'}

    t_img = t_img.clone()
    # 0. convert everthing to RGB_1
    if encoding == 'RGB_1':
        pass
    elif encoding == 'RGB_1_norm':
        means = [0.485, 0.456, 0.406]
        stds = [0.229, 0.224, 0.225]
        for channel, m, sd in zip(t_img, means, stds):
            channel.mul_(sd).add_(m)
    elif encoding == 'BGR_255_norm':
        raise NotImplementedError()
    else:
        raise NotImplementedError()
    im = t_img.permute(1, 2, 0).numpy()
    # 1. convert RGB_1 to output format
    if out_format == 'RGB_1':
        pass
    elif out_format == 'BGR_1':
        im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    elif out_format == 'RGB_uint8':
        im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        im = (im * 255).astype('uint8')
    elif out_format == 'BGR_uint8':
        im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
        im = (im * 255).astype('uint8')
    else:
        raise NotImplementedError()
    return im
Example #8
0
    def forward(self,
                queries: torch.FloatTensor,
                keys: torch.FloatTensor,
                queue: torch.FloatTensor):

        # Calculate logits
        pos_logits = torch.einsum('nc,nc->n', [queries, keys]).view(-1, 1)
        neg_logits = torch.einsum('nc,ck->nk', [queries, queue.clone().detach()])
        logits = torch.cat([pos_logits, neg_logits], dim=1)  # (B, 1+K)
        logits.div_(self.temperature)

        # Create labels
        labels = torch.zeros(logits.size(0), dtype=torch.long, device=logits.device)

        # Compute loss
        loss = nn.functional.cross_entropy(logits, labels)

        return loss, logits, labels
Example #9
0
    def __init__(self,
                 X: torch.FloatTensor,
                 y: torch.LongTensor,
                 transform=None,
                 all_tasks: list = [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)],
                 truncate_size: int = None):
        super().__init__()
        self.all_tasks = all_tasks
        self.current_task = None
        self.truncate_size = truncate_size

        assert isinstance(X, torch.FloatTensor)
        assert isinstance(y, torch.LongTensor)
        self.all_y = y.clone()
        self.all_x = X.clone()
        self.y = self.all_y
        self.X = self.all_x
        self.transform = transform
Example #10
0
    def forward(self,
                query: torch.FloatTensor,
                pseudo: torch.FloatTensor,
                key: torch.FloatTensor,
                negatives: torch.FloatTensor,
                threshold: float = 0.5):
        """
        1.Compute logits between:
            a) query vs. {key, queue}.
            b) pseudo vs. {queue}.
        2. Compute loss as:
            c) cross entropy of deterministic positives.
            d) cross entropy of pseudo positives. Pseudo labels are learned by b).
        """

        # Clone memory queue, to avoid unintended inplace operations
        negatives = negatives.clone().detach()

        # a & c
        logits_pos = torch.einsum('bf,bf->b', [query, key]).view(-1,
                                                                 1)  # (B, 1)
        logits_neg = torch.einsum('bf,fk->bk', [query, negatives])  # (B, K)
        logits = torch.cat([logits_pos, logits_neg], dim=1)  # (B, 1+K)
        logits.div_(self.temperature)
        labels = torch.zeros(logits.size(0),
                             dtype=torch.long,
                             device=logits.device)
        loss = F.cross_entropy(logits, labels)

        # b & d
        logits_pseudo_neg = torch.einsum('bf,fk->bk',
                                         [pseudo, negatives])  # (B, K)
        if self.contrast_mode == 'queue':
            logits_pseudo_neg.div_(self.pseudo_temperature)
            loss_pseudo, mask_pseudo_neg = self._pseudo_loss_against_queue(
                logits, logits_pseudo_neg, threshold)
            return loss, logits, labels, loss_pseudo, mask_pseudo_neg
        elif self.contrast_mode == 'batch':
            logits_pseudo_neg.div_(self.pseudo_temperature)
            loss_pseudo, probs_pseudo_neg = self._pseudo_loss_against_batch(
                logits, logits_pseudo_neg, threshold)
            return loss, logits, labels, loss_pseudo, probs_pseudo_neg
        else:
            raise NotImplementedError
Example #11
0
def mask_padded_values(xs: _torch.FloatTensor, n: _torch.LongTensor,
                       mask_value: float = -float('inf'),
                       mutate: bool = False):
    """Turns padded values into given mask value.

    Args:
        xs: A tensor of size (batch_size, list_size, 1) containing padded
            values.
        n: A tensor of size (batch_size) containing list size of each query.
        mask_value: The value to mask with (default: -inf).
        mutate: Whether to mutate the values of xs or return a copy.
    """
    mask = _torch.repeat_interleave(
        _torch.arange(xs.shape[1], device=xs.device).reshape((1, xs.shape[1])),
        xs.shape[0], dim=0)
    n_mask = _torch.repeat_interleave(
        n.reshape((n.shape[0], 1)), xs.shape[1], dim=1)
    if not mutate:
        xs = xs.clone()
    xs[mask >= n_mask] = mask_value
    return xs
Example #12
0
def bbox_to_mask(bboxes: torch.FloatTensor,
                 bb_format='cxcywhd',
                 mask_size=2048) -> torch.BoolTensor:
    '''
    Convert bounding boxes to binary masks

    Args:
        bboxes: bounding boxes, shape [N, bb_param]
    
    Return:
        masks: shape [N, mask_size, mask_size]
    '''
    assert isinstance(bboxes, torch.FloatTensor) and bboxes.dim() == 2
    if bb_format == 'cxcywhd':
        assert bboxes.shape[1] == 5
        bboxes = bboxes.clone()
        bboxes[:, 4] = bboxes[:, 4] / 180 * pi
        vertices = xywha2vertex(bboxes, is_degree=False)
        masks = vertex2masks(vertices, mask_size=mask_size)
    else:
        raise NotImplementedError()

    return masks
Example #13
0
 def xtrans(self, Xc: FloatTensor, Xe: LongTensor) -> FloatTensor:
     Xall = Xc.clone() if self.num_cont > 0 else torch.zeros(Xe.shape[0], 0)
     if self.num_enum > 0:
         Xall = torch.cat([Xall, self.emb_trans(Xe)], dim=1)
     return Xall
Example #14
0
def _show_torch(tensor: torch.FloatTensor, zoom: float = 1.) -> None:
    numpy_tensor = tensor.clone().mul(255).int().numpy().astype(
        'u1').transpose(1, 2, 0)
    _show_numpy(numpy_tensor, zoom)