Exemple #1
0
 def handle_batch(self, batch):
     # model train/valid step
     # unpack the batch
     x, y = batch
     # <--- multi-model usage --->
     # run model forward pass
     x_ = self.model["encoder"](x)
     logits = self.model["head"](x_)
     # <--- multi-model usage --->
     # compute the loss
     loss = self.criterion(logits, y)
     # compute other metrics of interest
     accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))
     # log metrics
     self.batch_metrics.update({
         "loss": loss,
         "accuracy01": accuracy01,
         "accuracy03": accuracy03
     })
     for key in ["loss", "accuracy01", "accuracy03"]:
         self.meters[key].update(self.batch_metrics[key].item(),
                                 self.batch_size)
     # run model backward pass
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()
 def handle_batch(self, batch):
     # model train/valid step
     # unpack the batch
     x, y = batch
     # run model forward pass
     logits = self.model(x)
     # <--- multi-criterion usage --->
     # compute the loss
     loss_multiclass = self.criterion["multiclass"](logits, y)
     loss_multilabel = self.criterion["multilabel"](logits, F.one_hot(y, 10).to(torch.float32))
     loss = loss_multiclass + loss_multilabel
     # <--- multi-criterion usage --->
     # compute other metrics of interest
     accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))
     # log metrics
     self.batch_metrics.update(
         {"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
     )
     for key in ["loss", "accuracy01", "accuracy03"]:
         self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)
     # run model backward pass
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()
    def _handle_batch(self, batch):
        """
        Docs.
        """
        x, y = batch
        x = x.view(x.size(0), -1)
        y_hat, x_, z_logprob, loc, log_scale = self.model(x)

        loss_clf = F.cross_entropy(y_hat, y)
        loss_ae = F.mse_loss(x_, x)
        loss_kld = (-0.5 *
                    torch.mean(1 + log_scale - loc.pow(2) - log_scale.exp()) *
                    0.1)
        loss_logprob = torch.mean(z_logprob) * 0.01
        loss = loss_clf + loss_ae + loss_kld + loss_logprob
        accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat,
                                                              y,
                                                              topk=(1, 3, 5))

        self.batch_metrics = {
            "loss_clf": loss_clf,
            "loss_ae": loss_ae,
            "loss_kld": loss_kld,
            "loss_logprob": loss_logprob,
            "loss": loss,
            "accuracy01": accuracy01,
            "accuracy03": accuracy03,
            "accuracy05": accuracy05,
        }

        if self.is_train_loader:
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()
Exemple #4
0
    def _handle_batch(self, batch):
        """
        Docs.
        """
        x, y = batch
        x = x.view(x.size(0), -1)
        y_hat, x_ = self.model(x)
        loss_clf = F.cross_entropy(y_hat, y)
        loss_ae = F.mse_loss(x_, x)
        loss = loss_clf + loss_ae
        accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat,
                                                              y,
                                                              topk=(1, 3, 5))

        self.batch_metrics = {
            "loss_clf": loss_clf,
            "loss_ae": loss_ae,
            "loss": loss,
            "accuracy01": accuracy01,
            "accuracy03": accuracy03,
            "accuracy05": accuracy05,
        }

        if self.is_train_loader:
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()
Exemple #5
0
 def _handle_batch(self, batch):
     (
         city_id_tensor,
         booker_country_tensor,
         device_class_tensor,
         affiliate_id_tensor,
         y,
     ) = batch
     out = self.model(
         city_id_tensor,
         booker_country_tensor,
         device_class_tensor,
         affiliate_id_tensor,
     )
     loss = self.criterion(out, y)
     accuracy01, accuracy04 = metrics.accuracy(out, y, topk=(1, 4))
     self.batch_metrics.update({
         "loss": loss,
         "accuracy01": accuracy01,
         "accuracy04": accuracy04
     })
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()
Exemple #6
0
    def _handle_batch(self, batch):
        x, y = batch
        x_noise = (x + torch.rand_like(x)).clamp_(0, 1)
        y_hat, x_ = self.model(x_noise)

        loss_clf = F.cross_entropy(y_hat, y)
        iou = metrics.iou(x_, x)
        loss_iou = 1 - iou
        loss = loss_clf + loss_iou
        accuracy01, accuracy03, accuracy05 = metrics.accuracy(
            y_hat, y, topk=(1, 3, 5)
        )

        self.batch_metrics = {
            "loss_clf": loss_clf,
            "loss_iou": loss_iou,
            "loss": loss,
            "iou": iou,
            "accuracy01": accuracy01,
            "accuracy03": accuracy03,
            "accuracy05": accuracy05,
        }

        if self.is_train_loader:
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()
Exemple #7
0
def test_accuracy_top1():
    """
    Tests for catalyst.utils.metrics.accuracy metric.
    """
    for i in range(NUM_CLASSES):
        outputs = torch.zeros((BATCH_SIZE, NUM_CLASSES))
        outputs[:, i] = 1
        targets = torch.ones((BATCH_SIZE, 1)) * i

        top1, top3, top5 = metrics.accuracy(outputs, targets, topk=(1, 3, 5))
        assert np.isclose(top1, 1)
        assert np.isclose(top3, 1)
        assert np.isclose(top5, 1)
 def _handle_batch(self, batch):
     input_tensors = batch[:-1]
     y = batch[-1]
     out = self.model(*input_tensors)
     loss = self.criterion(out, y)
     accuracy01, accuracy04 = metrics.accuracy(out, y, topk=(1, 4))
     self.batch_metrics.update(
         {"loss": loss, "accuracy01": accuracy01, "accuracy04": accuracy04}
     )
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()
Exemple #9
0
def test_accuracy_top3():
    """
    Tests for catalyst.utils.metrics.accuracy metric.
    """
    outputs = (torch.linspace(0, NUM_CLASSES - 1, steps=NUM_CLASSES).repeat(
        1, BATCH_SIZE).view(-1, NUM_CLASSES))

    for i in range(NUM_CLASSES):
        targets = torch.ones((BATCH_SIZE, 1)) * i

        top1, top3, top5 = metrics.accuracy(outputs, targets, topk=(1, 3, 5))
        assert np.isclose(top1, 1 if i >= NUM_CLASSES - 1 else 0)
        assert np.isclose(top3, 1 if i >= NUM_CLASSES - 3 else 0)
        assert np.isclose(top5, 1 if i >= NUM_CLASSES - 5 else 0)
Exemple #10
0
    def _handle_batch(self, batch):
        # model train/valid step
        x, y = batch
        y_hat = self.model(x.view(x.size(0), -1))

        loss = F.cross_entropy(y_hat, y)
        accuracy01, accuracy03 = metrics.accuracy(y_hat, y, topk=(1, 3))
        self.batch_metrics.update(
            {"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
        )

        if self.is_train_loader:
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()
Exemple #11
0
 def _handle_batch(self, batch):
     (
         city_id_tensor,
         # booker_country_tensor,
         device_class_tensor,
         affiliate_id_tensor,
         month_checkin_tensor,
         num_checkin_tensor,
         days_stay_tensor,
         days_move_tensor,
         hotel_country_tensor,
         num_visit_drop_duplicates_tensor,
         num_visit_tensor,
         num_visit_same_city_tensor,
         num_stay_consecutively_tensor,
         y_s,
         y_h,
     ) = batch
     out_s, out_h = self.model(
         city_id_tensor,
         # booker_country_tensor,
         device_class_tensor,
         affiliate_id_tensor,
         month_checkin_tensor,
         num_checkin_tensor,
         days_stay_tensor,
         days_move_tensor,
         hotel_country_tensor,
         num_visit_drop_duplicates_tensor,
         num_visit_tensor,
         num_visit_same_city_tensor,
         num_stay_consecutively_tensor,
     )
     loss_s = self.criterion(out_s, y_s)
     loss_h = self.criterion(out_h, y_h)
     loss = loss_s * 0.9 + loss_h * 0.1
     accuracy01, accuracy04 = metrics.accuracy(out_s, y_s, topk=(1, 4))
     self.batch_metrics.update({
         "loss": loss,
         "accuracy01": accuracy01,
         "accuracy04": accuracy04
     })
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()
    def handle_batch(self, batch):
        """Model train/valid step."""
        logits = self.model(batch["features"].view(batch["features"].size(0),
                                                   -1))

        loss = F.cross_entropy(logits, batch["targets"])
        accuracy01, accuracy03 = metrics.accuracy(logits,
                                                  batch["targets"],
                                                  topk=(1, 3))
        self.batch_metrics.update({
            "loss": loss,
            "accuracy01": accuracy01,
            "accuracy03": accuracy03
        })

        if self.is_train_loader:
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()
Exemple #13
0
 def handle_batch(self, batch):
     # model train/valid step
     # unpack the batch
     x, y = batch
     # run model forward pass
     logits = self.model(x)
     # compute the loss
     loss = F.cross_entropy(logits, y)
     # compute other metrics of interest
     accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))
     # log metrics
     self.batch_metrics.update({
         "loss": loss,
         "accuracy01": accuracy01,
         "accuracy03": accuracy03
     })
     for key in ["loss", "accuracy01", "accuracy03"]:
         self.meters[key].update(self.batch_metrics[key].item(),
                                 self.batch_size)
     # run model backward pass
     if self.is_train_loader:
         loss.backward()
         self.optimizer.step()
         self.optimizer.zero_grad()