Esempio n. 1
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.transpose(1, 2).transpose(1, 3).to(self.device,
                                                 dtype=torch.float)
        y = y.to(self.device, dtype=torch.float)

        with torch.no_grad():
            out = self.model(x).flatten()
            loss = self.loss_fn(out, y)
            num_correct = torch.sum((out > 0) == (y == 1))
            out_norm = torch.softmax(out, dim=-1)
            if self.classification_threshold == None:
                TP = torch.sum((out > 0) * (y == 1))
                TN = torch.sum((out <= 0) * (y == 0))
                FP = torch.sum((out > 0) * (y == 0))
                FN = torch.sum((out <= 0) * (y == 1))
            else:
                TP = torch.sum(
                    (out_norm > self.classification_threshold) * (y == 1))
                TN = torch.sum(
                    (out_norm <= self.classification_threshold) * (y == 0))
                FP = torch.sum(
                    (out_norm > self.classification_threshold) * (y == 0))
                FN = torch.sum(
                    (out_norm <= self.classification_threshold) * (y == 1))
        return BatchResult(loss.item(), num_correct.item(), TP, TN, FP, FN,
                           out, y)
Esempio n. 2
0
    def test_batch(self, batch) -> BatchResult:
        """
        Evaluate model once over a test set (single epoch).
        :param dl_test: DataLoader for the test set.
        :param kw: Keyword args supported by _foreach_batch.
        :return: An EpochResult for the epoch.
        """
        rgb   = batch['rgb']
        depth = batch['depth']

        # Ground-Truth Depth Gradients
        x_gt  = batch['x']
        y_gt  = batch['y']

        rgb   = rgb.to(self.device)
        depth = depth.to(self.device)
        x_gt  = x_gt.to(self.device)
        y_gt  = y_gt.to(self.device)
        xy_gt = torch.cat((x_gt, y_gt), dim=1)

        with torch.no_grad():
            xy   = self.model(rgb_batch=rgb, depth_batch=depth)
            loss = self.model.loss(ground_truth_grads=xy_gt, approximated_grads=xy)

        return BatchResult(loss.item())
Esempio n. 3
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = (x[0].to(self.device,
                     dtype=torch.float), x[1].to(self.device,
                                                 dtype=torch.float))
        y = y.to(self.device, dtype=torch.float)

        with torch.no_grad():
            out = self.model(x).flatten()
            loss = self.loss_fn(out, y)
            num_correct = torch.sum((out > 0) == (y == 1))
            out_norm = torch.sigmoid(out)
            if self.classification_threshold == None:
                TP = torch.sum((out > 0) * (y == 1))
                TN = torch.sum((out <= 0) * (y == 0))
                FP = torch.sum((out > 0) * (y == 0))
                FN = torch.sum((out <= 0) * (y == 1))
            else:
                TP = torch.sum(
                    (out_norm >= self.classification_threshold) * (y == 1))
                TN = torch.sum(
                    (out_norm < self.classification_threshold) * (y == 0))
                FP = torch.sum(
                    (out_norm >= self.classification_threshold) * (y == 0))
                FN = torch.sum(
                    (out_norm < self.classification_threshold) * (y == 1))
                num_correct = torch.sum(
                    (out_norm > self.classification_threshold) == (y == 1))

        return BatchResult(loss.item(), num_correct.item(), TP, TN, FP, FN,
                           out, y)
Esempio n. 4
0
    def train_batch(self, batch) -> BatchResult:
        """
        Runs a single batch forward through the model, calculates loss,
        preforms back-propagation and uses the optimizer to update weights.
        :param batch: A single batch of data from a DataLoader.
        :return: A BatchResult containing the value of the loss function and
        the number of correctly classified samples in the batch.
        """
        rgb   = batch['rgb']
        depth = batch['depth']

        # Ground-Truth Depth Gradients
        x_gt  = batch['x']
        y_gt  = batch['y']

        rgb   = rgb.to(self.device)
        depth = depth.to(self.device)
        x_gt  = x_gt.to(self.device)
        y_gt  = y_gt.to(self.device)
        xy_gt = torch.cat((x_gt, y_gt), dim=1)

        xy    = self.model(rgb_batch=rgb,depth_batch=depth)

        loss  = self.model.loss(ground_truth_grads=xy_gt, approximated_grads=xy)
        self.model.optimizer.zero_grad()
        loss.backward()
        self.model.optimizer.step()

        return BatchResult(loss.item())
Esempio n. 5
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.transpose(1, 2).transpose(1, 3).to(self.device,
                                                 dtype=torch.float)
        y = y.to(self.device, dtype=torch.float)

        with torch.no_grad():
            out = self.model(x).flatten()
            loss = self.loss_fn(out, y)
            num_correct = torch.sum((out > 0) == (y == 1))

        return BatchResult(loss.item(), num_correct.item())
Esempio n. 6
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = (x[0].to(self.device,
                     dtype=torch.float), x[1].to(self.device,
                                                 dtype=torch.float))
        y = y.to(self.device, dtype=torch.float)

        with torch.no_grad():
            out = self.model(x)
            loss = self.loss_fn(out.flatten(), y.flatten())
            indices = out > 0  #torch.max(out, 1) _,
            indices1 = y > 0  #torch.max(y, 1) _,

            num_correct = torch.sum(indices == indices1)
        return BatchResult(loss.item(), num_correct.item())
Esempio n. 7
0
    def train_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.transpose(1, 2).transpose(1, 3).to(self.device,
                                                 dtype=torch.float)
        y = y.to(self.device, dtype=torch.float)

        self.optimizer.zero_grad()

        out = self.model(x).flatten()
        loss = self.loss_fn(out, y)
        loss.backward()
        self.optimizer.step()

        num_correct = torch.sum((out > 0) == (y == 1))

        return BatchResult(loss.item(), num_correct.item())
Esempio n. 8
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.to(self.device, dtype=torch.float)  # (B,S,V)
        y = y.to(self.device, dtype=torch.long)  # (B,S)

        with torch.no_grad():
            # Evaluate the SRM model on one batch of data.
            # - Forward pass
            # - Loss calculation
            # - Calculate number of correct predictions
            scores = self.model.forward(x)
            loss = self.loss_fn.forward(scores, y)
            y_hat = torch.argmax(scores, dim=1)
            num_correct = torch.sum(y_hat == y)

        return BatchResult(loss.item(), num_correct.item())
Esempio n. 9
0
    def test_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.transpose(1, 2).transpose(1, 3).to(self.device,
                                                 dtype=torch.float)
        y = (y[0].to(self.device,
                     dtype=torch.float), y[1].to(self.device,
                                                 dtype=torch.float))
        batch_size = y[0].shape[0]

        with torch.no_grad():
            out = self.model(x)
            loss = self.loss_fn(out[0], y[0]) + self.loss_fn(out[1], y[1])
            num_correct = \
                batch_size * (
                        torch.sum(torch.abs(out[0] - y[0]) < 0.01) + torch.sum(torch.abs(out[0] - y[0]) < 0.01)) \
                / (torch.numel(y[0]) + torch.numel(y[1]))

        return BatchResult(loss.item(), num_correct.item())
Esempio n. 10
0
    def train_batch(self, batch) -> BatchResult:
        x, y = batch
        x = (x[0].to(self.device,
                     dtype=torch.float), x[1].to(self.device,
                                                 dtype=torch.float))
        y = y.to(self.device, dtype=torch.float)

        self.optimizer.zero_grad()

        out = self.model(x)  #.flatten()
        loss = self.loss_fn(out, y)
        loss.backward()
        self.optimizer.step()

        indices = out > 0  #torch.max(out, 1)  #_,
        indices1 = y > 0  #torch.max(y, 1)  #_,

        num_correct = torch.sum(indices == indices1)

        return BatchResult(loss.item(), num_correct.item())
    def train_batch(self, batch) -> BatchResult:
        x, y = batch
        x = x.transpose(1, 2).transpose(1, 3).to(self.device,
                                                 dtype=torch.float)
        y = (y[0].to(self.device,
                     dtype=torch.float), y[1].to(self.device,
                                                 dtype=torch.float))
        batch_size = y[0].shape[0]

        self.optimizer.zero_grad()

        out = self.model(x)
        loss = self.loss_fn(out[0], y[0]) + self.loss_fn(out[1], y[1])
        loss.backward()
        self.optimizer.step()

        num_correct = batch_size*(torch.sum(torch.abs(out[0]-y[0]) < 0.01) + torch.sum(torch.abs(out[1]-y[1]) < 0.01))\
            / (torch.numel(y[0]) + torch.numel(y[1]))

        return BatchResult(loss.item(), num_correct.item())
Esempio n. 12
0
    def train_batch(self, batch) -> BatchResult:
        X, y = batch
        X = X.to(self.device, dtype=torch.float)  # (B,S,V)
        y = y.to(self.device, dtype=torch.long)  # (B,S)

        #  Train the SRM model on one batch of data.
        # - Forward pass
        # - Calculate total loss over sequence
        # - Backward pass (BPTT)
        # - Update params
        # - Calculate number of correct char predictions

        self.optimizer.zero_grad()
        scores= self.model.forward(X)
        loss = self.loss_fn.forward(scores, y)
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        y_hat = torch.argmax(scores, dim=1)
        num_correct = torch.sum(y_hat == y)
        return BatchResult(loss.item(), num_correct.item())
Esempio n. 13
0
    def train_batch(self, batch) -> BatchResult:
        x, y = batch
        x = (x[0].to(self.device,
                     dtype=torch.float), x[1].to(self.device,
                                                 dtype=torch.float))
        y = y.to(self.device, dtype=torch.float)

        self.optimizer.zero_grad()

        out = self.model(x).flatten()
        loss = self.loss_fn(out, y)
        loss.backward()
        self.optimizer.step()

        num_correct = torch.sum((out > 0) == (y == 1))
        TP = torch.sum((out > 0) * (y == 1))
        TN = torch.sum((out <= 0) * (y == 0))
        FP = torch.sum((out > 0) * (y == 0))
        FN = torch.sum((out <= 0) * (y == 1))

        return BatchResult(loss.item(), num_correct.item(), TP, TN, FP, FN,
                           out, y)