コード例 #1
0
def create_baseline_trainer(model, optimizer=None, name='train', device=None):

    if device is not None:
        model.to(device)

    is_train = optimizer is not None

    def _update(engine, batch):
        model.train(is_train)

        with torch.set_grad_enabled(is_train):
            images, labels = convert_tensor(batch, device=device)
            preds = model(images)
            loss = F.cross_entropy(preds, labels)

        if is_train:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        return {'loss': loss.item(), 'y_pred': preds, 'y': labels}

    engine = Engine(_update)
    engine.name = name
    metrics.Average(lambda o: o['loss']).attach(engine, 'single_loss')
    metrics.Accuracy(lambda o: (o['y_pred'], o['y'])).attach(
        engine, 'single_acc')
    return engine
コード例 #2
0
def test_log_metrics(capsys):
    engine = Engine(lambda e, b: None)
    engine.logger = setup_logger(format="%(message)s")
    engine.run(list(range(100)), max_epochs=2)
    log_metrics(engine, "train")
    captured = capsys.readouterr()
    assert captured.err.split("\n")[-2] == "train [2/200]: {}"
コード例 #3
0
    def dcase_predict(self,
                      experiment_path: str,
                      feature_file: str,
                      predict_scp: str,
                      output: str = "predition.csv",
                      **kwargs):
        """kwargs: {'max_length': int, 'method': str, 'beam_size': int}"""

        dump = torch.load(os.path.join(experiment_path, "saved.pth"),
                          map_location="cpu")
        # Load previous training config
        config = dump["config"]

        vocabulary = torch.load(config["vocab_file"])
        model = self._get_model(config, len(vocabulary))
        model.load_state_dict(dump["model"])
        # Some scaler (sklearn standardscaler)
        scaler = dump["scaler"]
        zh = config["zh"]
        model = model.to(self.device)

        dataset = SJTUDatasetEval(feature=feature_file,
                                  eval_scp=predict_scp,
                                  transform=scaler.transform)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 shuffle=False,
                                                 collate_fn=collate_fn((1, )),
                                                 batch_size=32,
                                                 num_workers=0)

        pbar = ProgressBar(persist=False, ascii=True)
        predictions = []

        def _sample(engine, batch):
            # batch: [keys, feats, feat_lens]
            with torch.no_grad():
                model.eval()
                keys = batch[0]
                output = self._forward(model, batch, mode="sample", **kwargs)
                seqs = output["seqs"].cpu().numpy()
                for idx, seq in enumerate(seqs):
                    caption = self._convert_idx2sentence(seq,
                                                         vocabulary,
                                                         zh=zh)
                    predictions.append({
                        "file_name": keys[idx] + ".wav",
                        "caption_predicted": caption
                    })

        sample_engine = Engine(_sample)
        pbar.attach(sample_engine)
        sample_engine.run(dataloader)

        pred_df = pd.DataFrame(predictions)
        pred_df.to_csv(os.path.join(experiment_path, output), index=False)
コード例 #4
0
 def _on_iteration_complete_val(self, trainer_engine: Engine,
                                evaluator: Engine,
                                val_data_loader: utils.DataLoader):
     iter_num = trainer_engine.state.iteration
     epoch = trainer_engine.state.epoch
     if iter_num % 50 == 0:
         # Write evaluation loss
         evaluator.run(val_data_loader)
         metrics = evaluator.state.metrics
         avg_nll = metrics['nll']
         self.writer.add_scalar("validation/loss", avg_nll, iter_num)
         print(f"Validation Results - Epoch: {epoch} - Iter: {iter_num} "
               f"Avg loss: {avg_nll:.5f}")
コード例 #5
0
    def _init_evaluator_engine_ensemble(self) -> Engine:
        # noinspection PyUnusedLocal
        def _inference(engine_, batch):
            for model in self.ens_models:
                model.eval()

            with torch.no_grad():
                x, y = batch
                x = x.to(device=self.device, non_blocking=True)
                y = y.to(device=self.device, non_blocking=True)

                avg_pred = torch.zeros_like(y)
                for model in self.ens_models:
                    y_pred = torch.sigmoid(model(x))
                    avg_pred += y_pred

                avg_pred /= self.len_models

                return avg_pred, y

        engine = Engine(_inference)

        for name, metric in self.val_metrics.items():
            metric.attach(engine, name)

        return engine
コード例 #6
0
def create_supervised_evaluator(algorithm,
                                metrics=None,
                                device=None,
                                non_blocking=False,
                                prepare_batch=_prepare_batch,
                                output_transform=lambda x: x.item()):
    metrics = metrics or {}

    if device:
        algorithm.model.to(device)

    def _inference(engine, batch):
        algorithm.model.eval()
        algorithm.training = False
        with torch.no_grad():
            batch = prepare_batch(batch,
                                  device=device,
                                  non_blocking=non_blocking)
            # TODO FIX THIS ASAP
            loss = algorithm.loss(*batch)
            return output_transform(loss)

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #7
0
def create_supervised_trainer(
    model,
    optimizer,
    loss_fn,
    cuda=True,
    device=None,
    non_blocking=False,
    output_transform=lambda x, y, y_pred, loss: loss.item()):
    if device:
        model.to(device)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        data = batch
        batch_inputs = data[:-1]
        batch_target = data[-1]

        batch_inputs = list(
            map(wrap, batch_inputs, [cuda for _ in range(len(batch_inputs))]))
        batch_target = Variable(batch_target)

        if cuda:
            batch_target = batch_target.cuda()

        batch_output = model(*batch_inputs)
        loss = loss_fn(batch_output, batch_target)
        loss.backward()  # same as the one in closure() defined in trainer

        optimizer.step()
        return output_transform(batch_inputs, batch_target, batch_output, loss)

    return Engine(_update)
コード例 #8
0
def create_supervised_evaluator(model, metrics={}, device=None):
    """
    Factory function for creating an evaluator for supervised models

    Args:
        model (`torch.nn.Module`): the model to train
        metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
        device (str, optional): device type specification (default: None).
            Applies to both model and batches.

    Returns:
        Engine: an evaluator engine with supervised inference function
    """
    if device:
        model.to(device)

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = _prepare_batch(batch, device=device)
            y_pred = model(x)
            return y_pred, y

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #9
0
def create_supervised_evaluator(model, metrics=None,
                                device=None, non_blocking=False,
                                pred_collector_function=None, extraction_target="window",
                                output_transform=lambda x, y, y_pred: (y_pred, y,)):
    """Adapted code from Ignite-Library in order to allow for handling of graphs."""

    metrics = metrics or {}

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            batch = transform_batch(extraction_target, batch, device)
            response = model(batch)

            if pred_collector_function is not None:
                pred_collector_function(response)
            
            return output_transform(batch.x, batch.y, response)

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #10
0
ファイル: utils.py プロジェクト: Likhit/AntsChallenge
 def __init__(self, ds_path, save_dir, view_radius, device):
     self.ds_path = ds_path
     self.device = device
     self.view_radius = view_radius
     self.net, self.net_name = self.define_net()
     self.net.to(device)
     self.optim = torch.optim.Adam(self.net.parameters())
     self.trainer = Engine(self.train_update_func)
     self.evaluator = Engine(self.val_update_func)
     self.saver = ModelCheckpoint(save_dir,
                                  self.net_name,
                                  save_interval=1,
                                  n_saved=20,
                                  require_empty=False)
     self._add_metrics()
     self._add_event_handlers()
コード例 #11
0
ファイル: ignite_helper.py プロジェクト: Cirets0h/atml19
def create_supervised_rnn_evaluator(model,
                                    metrics={},
                                    device=None,
                                    non_blocking=False,
                                    prepare_batch=_prepare_batch,
                                    output_transform=lambda x, y, y_pred: (
                                        y_pred,
                                        y,
                                    )):
    if device:
        model.to(device)

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = prepare_batch(batch,
                                 device=device,
                                 non_blocking=non_blocking)
            y_pred, hidden = model(x, None)
            return output_transform(x, y, y_pred)

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #12
0
def create_supervised_trainer(model,
                              optimizer,
                              loss_fn,
                              device=None,
                              non_blocking=False):
    """
    Factory function for creating a trainer for supervised models

    Args:
        model (`torch.nn.Module`): the model to train
        optimizer (`torch.optim.Optimizer`): the optimizer to use
        loss_fn (torch.nn loss function): the loss function to use
        device (str, optional): device type specification (default: None).
            Applies to both model and batches.
        non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
            with respect to the host. For other cases, this argument has no effect.

    Returns:
        Engine: a trainer engine with supervised update function
    """
    if device:
        model.to(device)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch, device=device, non_blocking=non_blocking)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        loss.backward()
        optimizer.step()
        return loss.item()

    return Engine(_update)
コード例 #13
0
def create_supervised_evaluator(model,
                                metrics=None,
                                y_to_score=None,
                                pred_to_score=None,
                                cuda=False):
    """
    Factory function for creating an evaluator for supervised models
    Args:
        model (torch.nn.Module): the model to train
        metrics (dict of str: Metric): a map of metric names to Metrics
        cuda (bool, optional): whether or not to transfer batch to GPU (default: False)
    Returns:
        Engine: an evaluator engine with supervised inference function
    """
    def _inference(engine, batch):
        model.eval()
        x, y = _prepare_batch(batch)
        y_pred = model(x)
        if y_to_score is not None:
            y = y_to_score(y, batch)

        if pred_to_score is not None:
            y_pred = pred_to_score(y_pred, batch)

        return batch.qid, y_pred, y

    engine = Engine(_inference)

    if metrics is None:
        metrics = {}

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #14
0
ファイル: evaluate.py プロジェクト: sally20921/dramaqa2020
def get_evaluator(args, model, loss_fn, metrics={}):
    # for coloring terminal output
    from termcolor import colored 

    sample_count = 0

    def _inference(evaluator, batch):
        nonlocal sample_count

        model.eval()
        with torch.no_grad():
            net_inputs, target = prepare_batch(args, batch, model.vocab)
            if net_inputs['subtitle'].nelement() == 0:
                import ipdb; ipdb.set_trace()  # XXX DEBUG
            y_pred = model(**net_inputs)
            batch_size = y_pred.shape[0]
            loss, stats = loss_fn(y_pred, target)

            vocab = model.vocab
            
            return loss.item(), stats, batch_size, y_pred, target  # TODO: add false_answer metric

    engine = Engine(_inference)

    metrics = {**metrics, **{
        'loss': StatMetric(output_transform=lambda x: (x[0], x[2])),
        'top1_acc': StatMetric(output_transform=lambda x: ((x[3].argmax(dim=-1) == x[4]).float().mean().item(), x[2]))
    }}
    if hasattr(loss_fn, 'get_metric'):
        metrics = {**metrics, **loss_fn.get_metric()}

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #15
0
    def _init_bayesian_evaluator_engine(self, model, eval_metrics,
                                        device) -> Engine:
        m = math.ceil(
            len(self.data_loaders.val_dataset) /
            self.config.data.batch_size_val)

        def _inference(engine_, batch):
            model.eval()

            with torch.no_grad():
                x, y = batch
                x = x.to(device, non_blocking=True)
                y = y.to(device, non_blocking=True)

                if self.train_cfg.beta_type == "Blundell":
                    beta = 2**(m - (engine_.state.iteration + 1)) / (2**m - 1)
                elif self.train_cfg.beta_type == "Soenderby":
                    beta = min(
                        self.trainer.state.epoch /
                        (self.train_cfg.num_epochs // 4), 1)
                elif self.train_cfg.beta_type == "Standard":
                    beta = 1 / m
                else:
                    beta = 0

                outputs, kl = model.probforward(x)

                return outputs, y, {'kl': kl, 'beta': beta}

        engine = Engine(_inference)

        for name, metric in eval_metrics.items():
            metric.attach(engine, name)

        return engine
コード例 #16
0
    def _init_bayesian_trainer_engine(self, model, optimizer, vi,
                                      device) -> Engine:
        m = math.ceil(
            len(self.data_loaders.train_dataset) / self.config.data.batch_size)

        def _update(engine_, batch):
            model.train()
            x, y = batch
            x = x.to(device=device, non_blocking=True)
            y = y.to(device=device, non_blocking=True)

            if self.train_cfg.beta_type == "Blundell":
                beta = 2**(m - (engine_.state.iteration + 1)) / (2**m - 1)
            elif self.train_cfg.beta_type == "Soenderby":
                beta = min(
                    engine_.state.epoch / (self.train_cfg.num_epochs // 4), 1)
            elif self.train_cfg.beta_type == "Standard":
                beta = 1 / m
            else:
                beta = 0

            optimizer.zero_grad()
            outputs, kl = model.probforward(x)
            loss = vi(outputs, y, kl, beta)
            loss.backward()
            optimizer.step()
            return loss.item()

        return Engine(_update)
コード例 #17
0
ファイル: manager.py プロジェクト: antonsteenvoorden/ADSR
    def _set_trainer(self,
                     model,
                     optimizer,
                     loss_fn,
                     scheduler=None,
                     device='cpu',
                     non_blocking=False,
                     prepare_batch=_prepare_batch):
        if device:
            model.to(device)

        def _update(engine, batch):
            model.train()
            optimizer.zero_grad()
            x, y = prepare_batch(batch,
                                 device=device,
                                 non_blocking=non_blocking)
            y_pred = model(x)
            (combined_loss, relevance_loss) = loss_fn(y_pred, y)
            combined_loss.backward()
            clip_grad_norm_(model.parameters(), 1)
            optimizer.step()

            # Update LR
            if scheduler is not None:
                scheduler.step()

            combined_loss = combined_loss.item()
            return x, y, y_pred, combined_loss

        self.trainer = Engine(_update)
コード例 #18
0
ファイル: engine.py プロジェクト: keshava/dlsi
def create_supervised_evaluator(
    model,
    prepare_batch,
    metrics=None,
    device=None,
    non_blocking=False,
    output_transform=val_transform,
):
    metrics = metrics or {}

    if device:
        model.to(device)

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = prepare_batch(batch,
                                 device=device,
                                 non_blocking=non_blocking)
            y_pred = model(x)
            y_pred = _upscale_model_output(y_pred, x)
            return output_transform(x, y, y_pred)

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #19
0
    def _init_trainer_engine(self) -> Engine:
        self.model.to(self.device)

        def _update(_engine, batch):
            self.model.train()

            x, y = batch
            x = x.to(device=self.device, non_blocking=True)
            y = y.to(device=self.device, non_blocking=True)

            pred, _, mu, var = self.model(x)

            loss, ce, mse, kl_div = self.vae_criterion(pred, y, x, x, mu, var)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            return {
                'loss': loss.item(),
                'segment_loss': ce.item(),
                'kl_div': kl_div.item()
            }

        _engine = Engine(_update)

        if self.train_metrics is not None:
            for name, metric in self.train_metrics.items():
                metric.attach(_engine, name)

        return _engine
コード例 #20
0
def create_unsupervised_trainer(model,
                                optimizer,
                                loss_fn,
                                device=None,
                                non_blocking=False,
                                prepare_batch=_prepare_batch):
    if device:
        model.to(device)

    def _update(engine, x):
        model.train()
        optimizer.zero_grad()
        x = prepare_batch(x, device=device, non_blocking=non_blocking)
        y = model(x, return_reconstruction=True)
        loss, loss_items = loss_fn(y, x, retlosses=True)
        loss.backward()
        optimizer.step()

        n = getattr(engine.state, 'avg_counter', 0)
        for key, val in loss_items.items():
            new_val = n * engine.state.metrics.get(key, 0) + val.item()
            engine.state.metrics[key] = new_val / (n + 1)

        engine.state.avg_counter = n + 1
        return loss

    return Engine(_update)
コード例 #21
0
def get_trainer(args, model, loss_fn, optimizer):
    def update_model(trainer, batch):
        model.train()
        optimizer.zero_grad()
        net_inputs, target = prepare_batch(args, batch, model.vocab)
        y_pred = model(**net_inputs)
        batch_size = y_pred.shape[0]
        loss, stats = loss_fn(y_pred, target)
        loss.backward()
        optimizer.step()
        return loss.item(), stats, batch_size, y_pred.detach(), target.detach()

    trainer = Engine(update_model)

    metrics = {
        'loss': StatMetric(output_transform=lambda x: (x[0], x[2])),
        'top1_acc': StatMetric(output_transform=lambda x: ((x[3].argmax(dim=-1) == x[4]).float().mean().item(), x[2]))
    }
    if hasattr(loss_fn, 'get_metric'):
        metrics = {**metrics, **loss_fn.get_metric()}

    for name, metric in metrics.items():
        metric.attach(trainer, name)

    return trainer
コード例 #22
0
def create_supervised_evaluator(model,
                                metrics={},
                                cuda=True,
                                device=None,
                                non_blocking=False,
                                output_transform=lambda x, y, y_pred: (
                                    y_pred,
                                    y,
                                )):

    if device:
        model.to(device)

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            batch_inputs = batch[:-1]
            batch_target = batch[-1]

            batch_inputs = list(
                map(wrap, batch_inputs,
                    [cuda for _ in range(len(batch_inputs))]))

            if cuda:
                batch_target = batch_target.cuda()
            batch_output = model(*batch_inputs)

            return output_transform(batch_inputs, batch_target, batch_output)

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #23
0
ファイル: engine.py プロジェクト: keshava/dlsi
def create_supervised_trainer_apex(
    model,
    optimizer,
    loss_fn,
    prepare_batch,
    device=None,
    non_blocking=False,
    output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()},
):
    from apex import amp

    if device:
        model.to(device)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
        y_pred = model(x)
        loss = loss_fn(y_pred.squeeze(1), y.squeeze(1))
        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()
        optimizer.step()
        return output_transform(x, y, y_pred, loss)

    return Engine(_update)
コード例 #24
0
ファイル: siamcos.py プロジェクト: chris4540/DD2430-ds-proj
    def evaluator(self):
        if not self._evaluator:
            evaluator = Engine(self.eval_inference)
            metrics = {
                "sim_acc":
                SiamSimAccuracy(margin=self.margin,
                                output_transform=lambda x:
                                (x['emb_vecs'], x['targets'])),
                "clsf_acc":
                Accuracy(
                    output_transform=lambda x: (x['cls_pred'], x['cls_true'])),
                "con_loss":
                Loss(self.loss_fns['contrastive'],
                     output_transform=lambda x: (x['emb_vecs'], x['targets'])),
                "clsf_loss":
                Loss(self.loss_fns['cross_entropy'],
                     output_transform=lambda x: (x['cls_pred'], x['cls_true']))
            }
            for name, metric in metrics.items():
                metric.attach(evaluator, name)

            # save down
            self._evaluator = evaluator

        return self._evaluator
コード例 #25
0
ファイル: evaluate.py プロジェクト: snuspl/vip_pipeline
def get_evaluator(args, model, loss_fn, metrics={}):
    def _inference(evaluator, batch):
        model.eval()
        with torch.no_grad():
            net_inputs, target = prepare_batch(args, batch, model.vocab)
            y_pred = model(**net_inputs)
            batch_size = y_pred.shape[0]
            loss, stats = loss_fn(y_pred, target)
            return loss.item(
            ), stats, batch_size, y_pred, target  # TODO: add false_answer metric

    engine = Engine(_inference)

    metrics = {
        **metrics,
        **{
            'loss':
            StatMetric(output_transform=lambda x: (x[0], x[2])),
            'top1_acc':
            StatMetric(output_transform=lambda x: ((x[3].argmax(dim=-1) == x[4]).float(
                                                   ).mean().item(), x[2]))
        }
    }
    if hasattr(loss_fn, 'get_metric'):
        metrics = {**metrics, **loss_fn.get_metric()}

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine
コード例 #26
0
ファイル: test_stores.py プロジェクト: vfdev-5/ignite
def dummy_evaluator():
    def dummy_process_function(engine, batch):
        return 1, 0

    dummy_evaluator = Engine(dummy_process_function)

    return dummy_evaluator
コード例 #27
0
def create_supervised_trainer(model, optimizer, loss_fn, device=None):
    """
    Factory function for creating a trainer for supervised models

    Args:
        model (`torch.nn.Module`): the model to train
        optimizer (`torch.optim.Optimizer`): the optimizer to use
        loss_fn (torch.nn loss function): the loss function to use
        device (str, optional): device type specification (default: None).
            Applies to both model and batches.

    Returns:
        Engine: a trainer engine with supervised update function
    """
    if device:
        model.to(device)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch, device=device)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        loss.backward()
        optimizer.step()
        return loss.item()

    return Engine(_update)
コード例 #28
0
ファイル: utils.py プロジェクト: mcd01/arvalus-experiments
def create_supervised_trainer(
    model,
    optimizer,
    loss_fn=None,
    device=None,
    non_blocking=False,
    output_transform=lambda x, y, y_pred, loss: loss.item()):
    """Adapted code from Ignite-Library in order to allow for handling of graphs."""
    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()

        if device is not None:
            batch = batch.to(device, non_blocking=non_blocking)

        batch.x = torch.abs(batch.x + torch.normal(
            0, 0.01, size=batch.x.shape).to(device).double())  # add some noise
        y_pred_fine, y_pred_coarse = model(batch)

        y_true_fine = batch.y_full
        y_true_coarse = batch.y

        y_true = (y_true_fine, y_true_coarse)
        y_pred = (y_pred_fine, y_pred_coarse)

        loss = loss_fn(y_pred, y_true)

        loss.backward()

        optimizer.step()

        return output_transform(batch.x, y_true, y_pred, loss)

    return Engine(_update)
コード例 #29
0
    def create_supervised_dp_trainer(model, optimizer, device=None, non_blocking=False,
                                     prepare_batch=_prepare_batch,
                                     output_transform=lambda x, y, y_pred, loss: loss.item()):

        """
        Factory function for creating a trainer for supervised models.

        Args:
            model (`torch.nn.Module`): the model to train.
            optimizer (`torch.optim.Optimizer`): the optimizer to use.
            loss_fn (torch.nn loss function): the loss function to use.
            device (str, optional): device type specification (default: None).
                Applies to batches after starting the engine. Model *will not* be moved.
                Device can be CPU, GPU or TPU.
            non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
                with respect to the host. For other cases, this argument has no effect.
            prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
                tuple of tensors `(batch_x, batch_y)`.
            output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
                to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
            deterministic (bool, optional): if True, returns deterministic engine of type
                :class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine`
                (default: False).

        Note:
            `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
            of the processed batch by default.

        Returns:
            Engine: a trainer engine with supervised update function.
        """
        if device:
            model.to(device)
            ema_model.to(device)

        def _update(engine, batch):
            model.train()
            optimizer.zero_grad()
            x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)

            # amp
            with autocast():
                if "snapmix_pipeline" in cfg.keys():
                    cfg_snapmix = cfg["snapmix_pipeline"]
                    x, ya, yb, lam_a, lam_b = snapmix(x, y, cfg_snapmix, model)
                    total_loss = model(x, ya, yb, lam_a, lam_b)
                else:
                    total_loss = model(x, y)
                    total_loss = total_loss.mean()

            scaler.scale(total_loss).backward()
            scaler.step(optimizer)
            writer.add_scalar("total loss", total_loss.cpu().data.numpy())
            scaler.update()
            ema.update(model)

            # 返回 loss.item()
            return output_transform(x, y, None, total_loss)

        return Engine(_update)
コード例 #30
0
ファイル: engines.py プロジェクト: ignatovmg/mhc-adventures
def _my_create_evaluator(model,
                         metrics={},
                         add_index=False,
                         device=None,
                         non_blocking=False,
                         prepare_batch=ignite.engine._prepare_batch):
    if device:
        model.to(device)

    def _inference(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = prepare_batch(batch[:2],
                                 device=device,
                                 non_blocking=non_blocking)
            y_pred = model(x)

            #if add_index:
            index = batch[2]
            return {'prediction': y_pred, 'target': y, 'idx': index}
            #else:
            #    return {'prediction': y_pred, 'target': y}

    engine = Engine(_inference)

    for name, metric in metrics.items():
        metric.attach(engine, name)

    return engine