def _prepare_batch(batch: Batch, device: torch.device,
                   non_blocking: bool) -> Batch:
    x, y = batch
    return (
        convert_tensor(x, device=device, non_blocking=non_blocking),
        convert_tensor(y, device=device, non_blocking=non_blocking),
    )
def _prepare_batch(batch: Union[Dict, List, Tuple],
                   device=None,
                   non_blocking: bool = False):
    """Prepare batch for training: pass to a device with options.

    """
    if isinstance(batch, dict):
        result = {
            key: convert_tensor(value,
                                device=device,
                                non_blocking=non_blocking)
            for key, value in batch.items()
        }
        return result
    elif isinstance(batch, tuple):
        result = (convert_tensor(value,
                                 device=device,
                                 non_blocking=non_blocking) for value in batch)
        return result
    elif isinstance(batch, list):
        result = [
            convert_tensor(value, device=device, non_blocking=non_blocking)
            for value in batch
        ]
        return result
    else:
        raise ValueError("Only dict, tuples and lists are valid for batch")
def text_prepare_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options.

    """
    x, y = batch.text, batch.label
    return (convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device, non_blocking=non_blocking))
Exemple #4
0
    def prepare_validate_batch(self,
                               profile: Profile,
                               shared: Storage,
                               logger: Logger,
                               batch: Tuple[torch.Tensor],
                               device: Text,
                               non_blocking: bool = False):
        """
        Preparing batch of samples when validating. Implement this function to
        customize.

        Args:
            profile: Runtime profile defined in TOML file.
            shared: Shared storage in the whole lifecycle.
            logger: The logger named with this Task.
            batch: Raw batch provided by the data loader.
            device: Which device of the batch.
            non_blocking: Whether the action of moving the batch is blocking.

        Returns:
            Prepared batch.
        """
        x, y = batch
        return (
            convert_tensor(x,
                           device=torch.device(device),
                           non_blocking=non_blocking),
            convert_tensor(y,
                           device=torch.device(device),
                           non_blocking=non_blocking),
        )
 def prepare_batch(self, batch, mode = 'valid'):
     if mode == 'train':
         x, y = batch["images"], (batch["coded_labels"], batch["primary_codes"])
         return (
             convert_tensor(x, device=self.device, non_blocking=True),
             (
                 convert_tensor(y[0], device=self.device, non_blocking=True),
                 y[1]
             )
         )
     elif mode == 'valid':
         x, y = batch["images"], (batch["coded_labels"], batch["primary_codes"])
         return (
             convert_tensor(x, device=self.device, non_blocking=True),
             (
                 convert_tensor(y[0], device=self.device, non_blocking=True),
                 y[1]
             )
         )
     elif mode == 'test':
         x, inputs = batch["images"], batch
         return (
             convert_tensor(x, device=self.device, non_blocking=True),
             (inputs)
         )
Exemple #6
0
        def inference_step(engine, batch):
            self.model.eval()
            with torch.no_grad():
                x, y = batch
                x: torch.Tensor = convert_tensor(x,
                                                 device=device,
                                                 non_blocking=False)
                y: torch.Tensor = convert_tensor(y,
                                                 device=device,
                                                 non_blocking=False)
                y_pred = self.model(x)
                assert len(
                    y_pred.shape) == 4, "assuming 4dim model output: NCHW"
                assert y_pred.shape[
                    1] == 1, "assuming singleton channel axis in model output"
                y_pred = y_pred.squeeze(dim=1)
                if self.only_eval_where_true:
                    mask = y.eq(1)
                else:
                    mask = ...

                loss = self.loss_fn(y_pred[mask], y[mask])
                return {
                    X_NAME: x,
                    Y_NAME: y,
                    Y_PRED_NAME: y_pred,
                    LOSS_NAME: loss
                }
Exemple #7
0
def _prepare_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options.

    """
    x, y, *z = batch
    return (convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device, non_blocking=non_blocking), *z)
Exemple #8
0
    def _forward(self, model, batch, mode, **kwargs):
        assert mode in ("train", "sample")

        if mode == "sample":
            # SJTUDataSetEval
            feats = batch[1]
            feat_lens = batch[-1]

            feats = convert_tensor(feats.float(),
                                   device=self.device,
                                   non_blocking=True)
            sampled = model(feats, feat_lens, mode="sample", **kwargs)
            return sampled

        # mode is "train"

        feats = batch[0]
        caps = batch[1]
        keys = batch[2]
        feat_lens = batch[-2]
        cap_lens = batch[-1]
        feats = convert_tensor(feats.float(),
                               device=self.device,
                               non_blocking=True)
        caps = convert_tensor(caps.long(),
                              device=self.device,
                              non_blocking=True)
        
        assert "key2refs" in kwargs, "missing references"
        output = model(feats, feat_lens, keys, kwargs["key2refs"], 
                       max_length=max(cap_lens), scorer=kwargs["scorer"])
        
        return output
def _prepare_batch(batch: Batch, device: torch.device,
                   non_blocking: bool) -> Tuple[Any, Any]:
    if isinstance(batch, tuple):
        x, y = batch
        return (convert_tensor(x, device=device, non_blocking=non_blocking),
                convert_tensor(y, device=device, non_blocking=non_blocking))
    else:
        return convert_tensor(batch, device=device, non_blocking=non_blocking)
Exemple #10
0
def prepare_batch_for_tabolar_mode(batch, device, non_blocking):
    x, y, image_name_list, tabolar_data = batch
    return (convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device,
                           non_blocking=non_blocking), list(image_name_list),
            convert_tensor(tabolar_data,
                           device=device,
                           non_blocking=non_blocking).float())
Exemple #11
0
    def prepare_batch(batch, device, non_blocking):
        """Prepare batch for training: pass to a device with options.

        """
        return (
            convert_tensor(batch.x, device=device, non_blocking=non_blocking),
            convert_tensor(batch.y, device=device, non_blocking=non_blocking),
        )
Exemple #12
0
def prepare_batch(batch, device=None, non_blocking=False):
    x = convert_tensor(batch["features"],
                       device=device,
                       non_blocking=non_blocking)
    y = convert_tensor(batch["ehull"],
                       device=device,
                       non_blocking=non_blocking)
    return x, y
Exemple #13
0
def _prepare_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options.
    """
    from ignite.utils import convert_tensor

    x, y = batch
    return (convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device, non_blocking=non_blocking))
Exemple #14
0
 def _prepare_batch(batch, device=None, non_blocking=False):
     """Prepare batch for evaluation: pass to a device with options.
     """
     x, y = batch
     seq, lens, scalars, true_indices = x
     extracted = (seq, lens, scalars)
     return (convert_tensor(extracted, device=device, non_blocking=non_blocking),
             convert_tensor(y, device=device, non_blocking=non_blocking), true_indices)
Exemple #15
0
def _prepare_batch(batch, device=None, non_blocking=False):
    x, y, ids, patch_locations = batch
    return (
        convert_tensor(x, device=device, non_blocking=non_blocking),
        convert_tensor(y, device=device, non_blocking=non_blocking),
        ids,
        patch_locations,
    )
def inference_prepare_batch_f32(batch, device, non_blocking):
    x = batch['image']
    y = batch['mask'] if 'mask' in batch else None
    meta = batch['meta'] if 'meta' in batch else None

    x = convert_tensor(x, device, non_blocking=non_blocking)
    if y is not None:
        y = convert_tensor(y, device, non_blocking=non_blocking).long()
    return x, y, meta
Exemple #17
0
def prepare_test_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options.

    """
    index, x, y = batch

    # index 不参与模型计算,所以不用convert
    return (index, convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device, non_blocking=non_blocking))
def _prepare_batch(batch, device=None, non_blocking=False):
    """
    Prepare batch for training: pass to a device with options.
    """
    x1, x2, y1, y2 = batch
    return (convert_tensor(x1, device=device, non_blocking=non_blocking),
            convert_tensor(x2, device=device, non_blocking=non_blocking),
            convert_tensor(y1, device=device, non_blocking=non_blocking),
            convert_tensor(y2, device=device, non_blocking=non_blocking))
def prepare_dali_batch(batch, device=None, non_blocking=False):
    # extract out of data pipeline
    x = batch[0]["data"]
    y = batch[0]["label"]
    # convert
    y = y.squeeze().long()
    
    return (convert_tensor(x, device=device, non_blocking=non_blocking),
            convert_tensor(y, device=device, non_blocking=non_blocking))
Exemple #20
0
def _prepare_batch(
    batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
) -> Tuple[Union[torch.Tensor, Sequence, Mapping, str, bytes], ...]:
    """Prepare batch for training: pass to a device with options."""
    x, y = batch
    return (
        convert_tensor(x, device=device, non_blocking=non_blocking),
        convert_tensor(y, device=device, non_blocking=non_blocking),
    )
Exemple #21
0
 def _evaluate(evaluator, batch):
     model.eval()
     with torch.no_grad():
         feats, ref_feats, indices = batch
         feats, ref_feats, indices = convert_tensor(feats, device),\
             convert_tensor(ref_feats, device), convert_tensor(indices, device)
         score, mask = model(feats, ref_feats, indices)
         loss = criterion(score, mask)
     return loss.cpu().item()
Exemple #22
0
def prepare_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options."""
    xb, yb = batch
    return (
        tuple(
            convert_tensor(x, device=device, non_blocking=non_blocking)
            for x in xb),
        convert_tensor(yb, device=device, non_blocking=non_blocking),
    )
Exemple #23
0
def _prepare_batch(batch, device=None, non_blocking=False):
    """Prepare batch for training: pass to a device with options.

    """
    x, attention_mask, y = batch
    return (
        convert_tensor(x, device=device, non_blocking=non_blocking),
        convert_tensor(attention_mask, device=device, non_blocking=non_blocking),
        convert_tensor(y, device=device, non_blocking=non_blocking).float(),
    )
Exemple #24
0
 def _convert_dict_tensor(self, dict_tensor, device, non_blocking):
     if len(dict_tensor) == 0:
         return convert_tensor(dict_tensor,
                               device=device,
                               non_blocking=non_blocking)
     else:
         return DataDict({
             k: convert_tensor(v, device=device, non_blocking=non_blocking)
             for k, v in dict_tensor.items()
         })
Exemple #25
0
def prepare_batch_conv(batch, device=None, non_blocking=False):
    x = convert_tensor(batch["features"],
                       device=device,
                       non_blocking=non_blocking)
    size = x.shape
    x = x.reshape((size[0], 1, size[1]))
    y = convert_tensor(batch["ehull"],
                       device=device,
                       non_blocking=non_blocking)
    return x, y
Exemple #26
0
 def _forward(model, batch):
     inputs, targets, filenames = batch
     inputs, targets = convert_tensor(inputs,
                                      device=DEVICE,
                                      non_blocking=True), convert_tensor(
                                          targets.float(),
                                          device=DEVICE,
                                          non_blocking=True)
     clip_level_output, frame_level_output = model(inputs)
     return clip_level_output, frame_level_output, targets
Exemple #27
0
def test_convert_tensor():
    x = torch.tensor([0.0])
    tensor = convert_tensor(x)
    assert torch.is_tensor(tensor)

    x = torch.tensor([0.0])
    tensor = convert_tensor(x, device='cpu', non_blocking=True)
    assert torch.is_tensor(tensor)

    x = torch.tensor([0.0])
    tensor = convert_tensor(x, device='cpu', non_blocking=False)
    assert torch.is_tensor(tensor)

    x = [torch.tensor([0.0]), torch.tensor([0.0])]
    list_ = convert_tensor(x)
    assert isinstance(list_, list)
    assert torch.is_tensor(list_[0])
    assert torch.is_tensor(list_[1])

    x = (torch.tensor([0.0]), torch.tensor([0.0]))
    tuple_ = convert_tensor(x)
    assert isinstance(tuple_, tuple)
    assert torch.is_tensor(tuple_[0])
    assert torch.is_tensor(tuple_[1])

    x = {'a': torch.tensor([0.0]), 'b': torch.tensor([0.0])}
    dict_ = convert_tensor(x)
    assert isinstance(dict_, dict)
    assert torch.is_tensor(dict_['a'])
    assert torch.is_tensor(dict_['b'])

    assert convert_tensor('a') == 'a'

    with pytest.raises(TypeError):
        convert_tensor(12345)
Exemple #28
0
    def uda_process_function(engine, labelled_batch):

        x, y = _prepare_batch(labelled_batch, device=device, non_blocking=True)

        if with_UDA:
            unsup_x, unsup_aug_x = next(train_unlabelled_loader_iter)
            unsup_x = convert_tensor(unsup_x, device=device, non_blocking=True)
            unsup_aug_x = convert_tensor(unsup_aug_x,
                                         device=device,
                                         non_blocking=True)

        model.train()
        # Supervised part
        y_pred = model(x)
        loss = criterion(y_pred, y)

        supervised_loss = loss
        step = engine.state.iteration - 1
        if with_tsa and with_UDA:
            new_y_pred, new_y = tsa(y_pred, y, step=step)
            new_loss = criterion(new_y_pred, new_y)

            engine.state.tsa_log = {
                "new_y_pred": new_y_pred,
                "loss": loss.item(),
                "tsa_loss": new_loss.item()
            }
            supervised_loss = new_loss

        # Unsupervised part
        if with_UDA:
            unsup_orig_y_pred = model(unsup_x).detach()
            unsup_orig_y_probas = torch.softmax(unsup_orig_y_pred, dim=-1)

            unsup_aug_y_pred = model(unsup_aug_x)
            unsup_aug_y_probas = torch.log_softmax(unsup_aug_y_pred, dim=-1)

            consistency_loss = consistency_criterion(unsup_aug_y_probas,
                                                     unsup_orig_y_probas)

        final_loss = supervised_loss

        if with_UDA:
            final_loss += lam * consistency_loss

        optimizer.zero_grad()
        final_loss.backward()
        optimizer.step()

        return {
            'supervised batch loss': supervised_loss.item(),
            'consistency batch loss':
            consistency_loss.item() if with_UDA else 0.0,
            'final batch loss': final_loss.item(),
        }
Exemple #29
0
    def _forward(model, batch, mode, **kwargs):
        assert mode in ("train", "sample")

        if mode == "sample":
            # SJTUDataSetEval
            feats = batch[1]
            feat_lens = batch[-1]

            feats = convert_tensor(feats.float(),
                                   device=device,
                                   non_blocking=True)
            sampled = model(feats, feat_lens, mode="sample", **kwargs)
            return sampled

        # mode is "train"
        assert "train_mode" in kwargs, "need to provide training mode (XE or scst)"
        assert kwargs["train_mode"] in ("XE", "scst"), "unknown training mode"

        feats = batch[0]
        caps = batch[1]
        keys = batch[2]
        feat_lens = batch[-2]
        cap_lens = batch[-1]
        feats = convert_tensor(feats.float(),
                               device=device,
                               non_blocking=True)
        caps = convert_tensor(caps.long(),
                              device=device,
                              non_blocking=True)

        
        if kwargs["train_mode"] == "XE":
            # trained by cross entropy loss
            assert "tf" in kwargs, "need to know whether to use teacher forcing"
            ce = torch.nn.CrossEntropyLoss()
            # pack labels to remove padding from caption labels
            targets = torch.nn.utils.rnn.pack_padded_sequence(
                caps, cap_lens, batch_first=True).data
            if kwargs["tf"]:
                probs = model(feats, feat_lens, caps, cap_lens, mode="forward")
            else:
                sampled = model(feats, feat_lens, mode="sample", max_length=max(cap_lens))
                probs = torch.nn.utils.rnn.pack_padded_sequence(
                    sampled["probs"], cap_lens, batch_first=True).data
                probs = convert_tensor(probs, device=device, non_blocking=True)
            loss = ce(probs, targets)
            output = {"loss": loss}
        else:
            # trained by self critical reward (reinforcement learning)
            assert "key2refs" in kwargs, "missing references"
            scorer = kwargs.get("scorer", None)
            output = model(feats, feat_lens, keys, kwargs["key2refs"], 
                           mode="scst", max_length=max(cap_lens), scorer=scorer)
        
        return output
Exemple #30
0
def _prepare_batch(batch, device=None, non_blocking=False, t_type=torch.FloatTensor):
    x, y = batch
    new_x = convert_tensor(torch.squeeze(x, 1), device=device, non_blocking=non_blocking)
    new_y = convert_tensor(torch.unsqueeze(y, 2), device=device, non_blocking=non_blocking)
    if device == "cuda":
        return (
            new_x.type(t_type).cuda(),
            torch.unsqueeze(new_y, 3).type(torch.LongTensor).cuda(),
        )
    else:
        return new_x.type(t_type), torch.unsqueeze(new_y, 3).type(torch.LongTensor)