Ejemplo n.º 1
0
    def eval_step(engine, batch):
        """
        eval_step(engine, batch)

        Parameters
        ----------
        engine: ignite.engine.Engine
            The evaluator engine.
        batch: tuple
            The batch to evaluated.
        
        Returns
        -------
        output : torch.Tensor
            Shape (batch_size, num_channels, width, height)
        input : torch.Tensor
            Shape (batch_size, num_channels, width, height)
        target : torch.Tensor
            Shape (batch_size,)
        mu : torch.Tensor
            Shape (batch_size, latent_features)
        log_var : torch.Tensor
            Shape (batch_size, latent_features)
        """
        network.eval()
        input, target = _prepare_batch(batch,
                                       device=device,
                                       non_blocking=non_blocking)
        with torch.no_grad():
            output, mu, log_var = network(input, target)
        return output, input, target, mu, log_var
Ejemplo n.º 2
0
def _update(engine, batch):
    siamese_net.train()
    clsf_net.train()
    optimizer.zero_grad()
    x, targets = _prepare_batch(batch, device=device, non_blocking=pin_memory)
    c1, c2, _ = targets

    emb_vec1, emb_vec2 = siamese_net(x)
    contras_loss = con_loss_fn((emb_vec1, emb_vec2), targets)
    y1 = clsf_net(emb_vec1)
    y2 = clsf_net(emb_vec2)
    clsf_loss1 = cs_loss_fn(y1, c1)
    clsf_loss2 = cs_loss_fn(y2, c2)

    loss = contras_loss + (clsf_loss1 + clsf_loss2) * scale_factor
    loss.backward()
    optimizer.step()

    with torch.no_grad():
        cls_pred = torch.cat([y1, y2], dim=0)
        cls_true = torch.cat([c1, c2], dim=0)
        clsf_loss = clsf_loss1 + clsf_loss2

    ret = {
        "loss": loss.item(),
        "con_loss": contras_loss.item(),
        "clsf_loss": clsf_loss.item(),
        "emb_vecs": [emb_vec1, emb_vec2],
        "cls_pred": cls_pred,
        "cls_true": cls_true,
        "targets": targets
    }
    return ret
Ejemplo n.º 3
0
    def eval_inference(self, engine, batch):
        siam_net = self.models['siam_net']
        clsf_net = self.models['clsf_net']
        siam_net.eval()
        clsf_net.eval()
        with torch.no_grad():
            x, targets = _prepare_batch(batch,
                                        device=self.device,
                                        non_blocking=self.pin_memory)
            emb_vec1, emb_vec2 = siam_net(x)

            if self.l2_normalize:
                l2_emb_vec1 = F.normalize(emb_vec1, p=2, dim=1)
                l2_emb_vec2 = F.normalize(emb_vec2, p=2, dim=1)

            # make inference with emb_vecs
            # predictions
            y1 = clsf_net(emb_vec1)
            y2 = clsf_net(emb_vec2)
            # true labels
            c1, c2, _ = targets
            cls_pred = torch.cat([y1, y2], dim=0)
            cls_true = torch.cat([c1, c2], dim=0)

        ret = {"cls_pred": cls_pred, "cls_true": cls_true, "targets": targets}

        if self.l2_normalize:
            ret["emb_vecs"] = [l2_emb_vec1, l2_emb_vec2]
        else:
            ret["emb_vecs"] = [emb_vec1, emb_vec2]
        return ret
Ejemplo n.º 4
0
    def train_update(self, engine, batch):
        """
        We define the training update function for engine use
        as we don't want to have a second pass through the training set

        See also:
            https://pytorch.org/ignite/quickstart.html#f1
        """

        # alias
        cnn_net = self.models['cnn_net']
        optimizer = self.optimizer
        loss_fn = self.loss_fns['cross_entropy']

        cnn_net.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch,
                              device=self.device,
                              non_blocking=self.pin_memory)
        y_pred = cnn_net(x)
        loss = loss_fn(y_pred, y)
        loss.backward()
        optimizer.step()

        # contruct the return of the processing function of a engine
        ret = {
            "clsf_loss": loss.item(),
            "cls_pred": y_pred,
            "cls_true": y,
        }

        return ret
Ejemplo n.º 5
0
 def _update(engine, batch):
     incept.eval()
     att.train()
     x, y = _prepare_batch(batch, device=device)
     loss = train_batch(x, y, optimizer, loss_fn)
     engine.state.loss_total += loss
     return loss
Ejemplo n.º 6
0
    def train_update(self, engine, batch):
        # alias
        siam_net = self.models['siam_net']
        optimizer = self.optimizer
        con_loss_fn = self.loss_fns['contrastive']

        siam_net.train()
        optimizer.zero_grad()
        x, targets = _prepare_batch(batch,
                                    device=self.device,
                                    non_blocking=self.pin_memory)
        emb_vec1, emb_vec2 = siam_net(x)

        contras_loss = con_loss_fn((emb_vec1, emb_vec2), targets)

        loss = contras_loss
        loss.backward()
        optimizer.step()

        # contruct the return of the processing function of a engine
        ret = {
            "con_loss": contras_loss.item(),
            "targets": targets,
            "emb_vecs": [emb_vec1, emb_vec2]
        }

        return ret
    def update_fn(_trainer, batch):
        student.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch, device=device, non_blocking=non_blocking)

        student_pred = student(x)
        with torch.no_grad():
            teacher_pred = teacher(x)

        supervised_loss = supervised_loss_fn(student_pred, y)
        distillation_loss = distillation_loss_fn(teacher_pred, student_pred)

        loss = supervised_loss + distillation_loss

        if use_f16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        optimizer.step()

        return {
            'loss': loss.item(),
            'supervised_loss': supervised_loss.item(),
            'distillation_loss': distillation_loss.item(),
        }
Ejemplo n.º 8
0
    def _update_model(engine, batch):
        x, y = _prepare_batch(batch, device=device, non_blocking=True)

        optimizer.zero_grad()
        with torch.no_grad():
            fake = model(x)
        real = y
        x_gan = torch.cat([fake, real], dim=0)
        y_gan = torch.cat([
            torch.zeros(fake.size(0), 1),
            torch.ones(real.size(0), 1)
        ]).to(device)

        y_pred = descriminator(x_gan)

        loss = loss_fn(y_pred, y_gan)

        if args.mixed_precision:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        optimizer.step()
        return loss
def train_DTL(distill_net, epoch):
    logger('\nClassification training Epoch: %d  LR: %.4f' % (epoch, optimizer.optimizer.param_groups[0]['lr']))
    distill_net.train()
    distill_net.module.s_net.train()
    distill_net.module.t_net.eval()
    bt_sum = len(trainloader)
    for batch_idx, bt in enumerate(trainloader):
        inputs, targets = _prepare_batch(bt, device=device) if device == 'cuda' else bt
        distill_net.module.batch_size = inputs.shape[0]
        outputs = distill_net(inputs, targets)

        # CE loss
        loss = outputs[:, 1].sum()
        loss_CE = loss.item()

        if args.DTL:
            # DTL loss
            loss1 = outputs[:, 2].sum()
            loss += loss1
            loss_DTL = loss1.item()
        optimizer.optimizer.zero_grad()
        loss.backward()
        optimizer.optimizer.step()

        if batch_idx % 20 == 0:
            logger('Loss: %.3f Loss_CE: %.3f Loss_DTL %.3f[%d/%d] ' % (loss.item(), loss_CE, loss_DTL, batch_idx, bt_sum))
Ejemplo n.º 10
0
 def _evaluate_model(engine, batch):
     model.eval()
     x, y = _prepare_batch(batch, device=device, non_blocking=non_blocking)
     with torch.no_grad():
         y_pred = model(x)
     if denormalize:
         y_pred, y = map(denorm_fn, [y_pred, y])
     return y_pred, y
Ejemplo n.º 11
0
 def eval_step(engine, batch):
     network.eval()
     Xb, yb = _prepare_batch(batch,
                             device=device,
                             non_blocking=non_blocking)
     with torch.no_grad():
         Xr, mu, log_var = network(Xb)
     return Xr, Xb, yb, mu, log_var
 def _update(engine, batch):
     model.train()
     optimizer.zero_grad()
     x, y = _prepare_batch(batch, device=device)
     y_pred, _ = model(x)
     loss = loss_fn(y_pred, y)
     loss.backward()
     optimizer.step()
     return loss.item(), y_pred, y
Ejemplo n.º 13
0
def process_function(engine, batch):
    model.train()
    optimizer.zero_grad()
    x, y = _prepare_batch(batch, device=device)
    y_pred = model(x)
    loss = criterion(y_pred, y)
    loss.backward()
    optimizer.step()
    return loss.item()
Ejemplo n.º 14
0
    def train_update(self, engine, batch):
        # alias
        siam_net = self.models['siam_net']
        clsf_net = self.models['clsf_net']
        optimizer = self.optimizer
        con_loss_fn = self.loss_fns['contrastive']
        cs_loss_fn = self.loss_fns['cross_entropy']

        siam_net.train()
        clsf_net.train()
        optimizer.zero_grad()
        x, targets = _prepare_batch(batch,
                                    device=self.device,
                                    non_blocking=self.pin_memory)
        c1, c2, _ = targets

        emb_vec1, emb_vec2 = siam_net(x)

        if self.l2_normalize:
            l2_emb_vec1 = F.normalize(emb_vec1, p=2, dim=1)
            l2_emb_vec2 = F.normalize(emb_vec2, p=2, dim=1)
            contras_loss = con_loss_fn((l2_emb_vec1, l2_emb_vec2), targets)
        else:
            contras_loss = con_loss_fn((emb_vec1, emb_vec2), targets)

        y1 = clsf_net(emb_vec1)
        y2 = clsf_net(emb_vec2)
        clsf_loss1 = cs_loss_fn(y1, c1)
        clsf_loss2 = cs_loss_fn(y2, c2)

        loss = self.scale_factor * contras_loss + clsf_loss1 + clsf_loss2
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            cls_pred = torch.cat([y1, y2], dim=0)
            cls_true = torch.cat([c1, c2], dim=0)
            clsf_loss = clsf_loss1 + clsf_loss2

        ret = {
            "loss": loss.item(),
            "con_loss": contras_loss.item(),
            "clsf_loss": clsf_loss.item(),
            "cls_pred": cls_pred,
            "cls_true": cls_true,
            "targets": targets
        }

        # add the emb_vecs
        if self.l2_normalize:
            ret["emb_vecs"] = [l2_emb_vec1, l2_emb_vec2]
        else:
            ret["emb_vecs"] = [emb_vec1, emb_vec2]

        return ret
Ejemplo n.º 15
0
 def train_step(engine: Engine, batch):
     network.train()
     optimizer.zero_grad()
     Xb, yb = _prepare_batch(batch,
                             device=device,
                             non_blocking=non_blocking)
     Xr, mu, log_var = network(Xb)
     loss = criterion(Xr, Xb, mu, log_var)
     loss.backward()
     optimizer.step()
     return loss.item()
Ejemplo n.º 16
0
 def train_step(engine, batch):
     network.train()
     optimizer.zero_grad()
     input, target = _prepare_batch(batch,
                                    device=device,
                                    non_blocking=non_blocking)
     output, mu, log_var = network(input, target)
     loss = criterion(output, input, mu, log_var)
     loss.backward()
     optimizer.step()
     return loss.item()
Ejemplo n.º 17
0
 def update_fn(engine, batch):
     network.train()
     optimizer.zero_grad()
     Xb, yb = _prepare_batch(batch,
                             device=device,
                             non_blocking=non_blocking)
     Xb = Xb.to(device)
     x_recon, mu, log_var = network(Xb)
     loss = criterion(x_recon, Xb, mu, log_var)
     loss.backward()
     optimizer.step()
     return loss.item()
Ejemplo n.º 18
0
    def _update(engine, batch):
        model.train()
        x, y = _prepare_batch(batch, device)
        y_pred = model(x)
        #pdb.set_trace()
        loss = loss_fn(y_pred, y)
        #custom
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if clip:
            torch.nn.utils.clip_grad_norm(model.parameters(), 0.5)

        return {'loss': loss.item(), 'y_pred': y_pred, 'y': y}
Ejemplo n.º 19
0
def train(net, epoch, criterion, w_list, all_two=False):
    # epoch_start_time = time.time()
    logger('\nClassification training Epoch: %d' % epoch)
    net.train()
    # train_loss = 0
    bt_sum = len(trainloader)
    logger('lr: %.4f' % optimizer.optimizer.param_groups[0]['lr'])
    for batch_idx, bt in enumerate(trainloader):
        data_length = len(trainloader)
        inputs, targets = _prepare_batch(
            bt, device=device) if device == 'cuda' else bt
        batch_size = inputs[0].size(0)
        inputs_l = torch.cat(inputs[:2])
        inputs_s = torch.cat(inputs[2:])

        outputs_l = net(inputs_l)
        outputs_s = net(inputs_s)

        outputs = []
        for output_l, output_s in zip(outputs_l, outputs_s):
            outputs.append(torch.cat((output_l, output_s)))
        loss = criterion(outputs, targets, w_list, all_two)
        output_list = []
        for op in outputs:
            output_list.append(op.split(batch_size))

        losses = 0
        losses += loss
        logger_str_ot = ''
        losses_ot_items = []
        for i in range(4):
            for j in range(i + 1, 4):
                loss_ot = 0
                for k in range(len(output_list)):
                    loss_ot += F.mse_loss(output_list[k][i], output_list[k][j])
                losses += loss_ot
                losses_ot_items.append(loss_ot.item())
                logger_str_ot += 'loss_ot{:>02d}{:>02d}'.format(
                    i + 1, j + 1) + ':{:>6.3f}\t'
        optimizer.optimizer.zero_grad()
        losses.backward()
        optimizer.optimizer.step()
        # train_loss += loss.item()
        # logger('Train \t Time Taken: %.2f sec' % (time.time() - epoch_start_time))
        if batch_idx % 20 == 0:
            logger((
                'epoch:{},\ttrain step:{:>4}/{}\tLoss:{:>6.3f}\tcls loss:{:>6.3f}\t\t'
                + logger_str_ot).format(epoch, batch_idx, data_length,
                                        losses.item(), loss.item(),
                                        *losses_ot_items))
Ejemplo n.º 20
0
    def _update_model(engine, batch):
        model.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch, device=device, non_blocking=non_blocking)

        y_pred = model(x)
        loss = loss_fn(y_pred, y)

        if mixed_precision:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()
        optimizer.step()
        return loss
    def update_fn(_trainer, batch):
        model.train()
        optimizer.zero_grad()
        x, y = _prepare_batch(batch, device=device, non_blocking=non_blocking)

        y_pred = model(x)
        loss = loss_fn(y_pred, y)

        if use_f16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        optimizer.step()
        return loss.item()
def train(net, epoch):
    # epoch_start_time = time.time()
    logger('\nClassification training Epoch: %d  LR: %.4f' % (epoch, optimizer.optimizer.param_groups[0]['lr']))
    net.train()
    bt_sum = len(trainloader)
    for batch_idx, bt in enumerate(trainloader):
        inputs, targets = _prepare_batch(bt, device=device) if device == 'cuda' else bt
        net.module.batch_size = inputs.shape[0]
        outputs = net(inputs)
        loss = multitask_loss(outputs, targets, criterion_CE)

        optimizer.optimizer.zero_grad()
        loss.backward()
        optimizer.optimizer.step()

        if batch_idx % 20 == 0:
            logger('Loss: %.3f[%d/%d] ' % (loss.item(), batch_idx, bt_sum))
def Distillation(distill_net, epoch, withCE=False):
    logger('\nDistillation Epoch: %d  LR: %.4f' % (epoch, optimizer.optimizer.param_groups[0]['lr']))

    distill_net.train()
    distill_net.module.s_net.train()
    distill_net.module.t_net.eval()

    train_loss, train_loss1, train_loss2, train_loss3, train_loss4 = 0, 0, 0, 0, 0

    for batch_idx, bt in enumerate(trainloader):
        inputs, targets = _prepare_batch(bt, device=device) if device == 'cuda' else bt
        distill_net.module.batch_size = inputs.shape[0]
        outputs = distill_net(inputs, targets)
        bt_sum = len(trainloader)
        loss = outputs[:, 0].sum()
        loss_AT = loss.item()

        if args.DTL is True:
            loss1 = outputs[:, 2].sum()
            loss += loss1
            loss_DTL = loss1.item()
        if withCE is True:
            loss += outputs[:, 1].sum()

        loss_AT1, loss_AT2, loss_AT3, loss_AT4 = outputs[:, 3].mean(), outputs[:, 4].mean(), outputs[:, 5].mean(), outputs[:, 6].mean()

        optimizer.optimizer.zero_grad()
        loss.backward()
        optimizer.optimizer.step()

        train_loss += loss.item()
        train_loss1 += loss_AT1.item()
        train_loss2 += loss_AT2.item()
        train_loss3 += loss_AT3.item()
        train_loss4 += loss_AT4.item()

        similarity1 = 100 * (1 - train_loss1 / (batch_idx+1))
        similarity2 = 100 * (1 - train_loss2 / (batch_idx+1))
        similarity3 = 100 * (1 - train_loss3 / (batch_idx+1))
        similarity4 = 100 * (1 - train_loss4 / (batch_idx+1))
        if batch_idx % 20 == 0:
            logger('similarity1: %.1f  similarity2: %.1f  similarity3: %.1f  similarity4: %.1f  loss_AT: %.3f  loss_DTL: %.3f[%d/%d]'
                  % (similarity1, similarity2, similarity3, similarity4, loss_AT, loss_DTL, batch_idx, bt_sum))

    optimizer.step()
Ejemplo n.º 24
0
    def _update(engine, batch):

        from ignite.engine import _prepare_batch

        model.train()
        optimizer.zero_grad()

        inputs, targets = _prepare_batch(batch, device=device)

        inputs, targets_a, targets_b, lam = mixup_data(
            inputs, targets, alpha, use_cuda=(device == "cuda"))
        outputs = model(inputs)

        loss = mixup_criterion(loss_fn, outputs, targets_a, targets_b, lam)

        loss.backward()
        optimizer.step()
        return loss.item()
    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()
        if not prepare_batch:
            x, y = _prepare_batch(batch, device=device)
        else:
            x, y = prepare_batch(batch, device=device)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)

        if scale_loss:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        optimizer.step()
        return loss.item(), y_pred, y
Ejemplo n.º 26
0
def train(net, epoch):
    # epoch_start_time = time.time()
    logger('\nClassification training Epoch: %d' % epoch)
    net.train()
    # train_loss = 0
    bt_sum = len(trainloader)
    logger('lr: %.4f' % optimizer.optimizer.param_groups[0]['lr'])
    for batch_idx, bt in enumerate(trainloader):
        inputs, targets = _prepare_batch(
            bt, device=device) if device == 'cuda' else bt
        outputs = net(inputs)
        loss = multitask_loss(outputs, targets, criterion_CE)

        optimizer.optimizer.zero_grad()
        loss.backward()
        optimizer.optimizer.step()
        # train_loss += loss.item()
        # logger('Train \t Time Taken: %.2f sec' % (time.time() - epoch_start_time))
        if batch_idx % 20 == 0:
            logger('Loss: %.3f[%d/%d] ' % (loss.item(), batch_idx, bt_sum))
Ejemplo n.º 27
0
def run_test_model(model, evaluate_loader, epoch, device, step=10, log_to_mlflow=False):
    model.eval()
    count_step = 0

    for idx, batch in enumerate(evaluate_loader):
        if count_step > step:
            break

        x, y = _prepare_batch(batch, device)

        predict = model(x)
        predict = torch.sigmoid(predict) > 0.2

        for i in range(len(x)):
            gt = evaluate_loader.dataset.mask_to_grayscale(y[i])
            img = evaluate_loader.dataset.mask_to_grayscale(predict[i])


        count_step += len(x)

    model.train()
Ejemplo n.º 28
0
def create_mask_rcnn_trainer(model: nn.Module, optimizer: optim.Optimizer, device=None, non_blocking: bool = False):
    if device:
        model.to(device)

    fn_prepare_batch = lambda batch: engine._prepare_batch(batch, device=device, non_blocking=non_blocking)

    def _update(engine, batch):
        model.train()
        optimizer.zero_grad()

        image, targets = fn_prepare_batch(batch)
        losses = model(image, targets)

        loss = sum(loss for loss in losses.values())

        loss.backward()
        optimizer.step()

        losses = {k: v.item() for k, v in losses.items()}
        losses['loss'] = loss.item()
        return losses

    return engine.Engine(_update)
Ejemplo n.º 29
0
def create_mask_rcnn_evaluator(model: nn.Module, metrics, device=None, non_blocking: bool = False):
    if device:
        model.to(device)

    fn_prepare_batch = lambda batch: engine._prepare_batch(batch, device=device, non_blocking=non_blocking)

    def _update(engine, batch):
        # warning(will.brennan) - not putting model in eval mode because we want the losses!
        with torch.no_grad():
            image, targets = fn_prepare_batch(batch)
            losses = model(image, targets)

            losses = {k: v.item() for k, v in losses.items()}
            losses['loss'] = sum(losses.values())

        # note(will.brennan) - an ugly hack for metrics...
        return (losses, len(image))

    evaluator = engine.Engine(_update)

    for name, metric in metrics.items():
        metric.attach(evaluator, name)

    return evaluator
Ejemplo n.º 30
0
    def prepare_batch(batch, device=None, non_blocking=False):

        return _prepare_batch((batch["img"], batch["label"]), device,
                              non_blocking)