def _val(self):
        self._model.eval()
        val_loss = 0.0
        val_acc = 0.0
        # for plot center lloss
        all_features, all_labels = [], []

        with torch.no_grad():
            for i, (images, targets) in tqdm(enumerate(self._val_loader),
                                             total=len(self._val_loader),
                                             leave=False):
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                # compute output, measure accuracy and record loss
                outputs, features = self._model(images)

                loss = self._criterion(outputs, targets)
                acc = accuracy(outputs, targets)[0]

                val_loss += loss.item()
                val_acc += acc.item()

                all_features.append(features.data.cpu().numpy())
                all_labels.append(targets.data.cpu().numpy())

            i += 1
            self._val_loss_list.append(val_loss / i)
            self._val_acc_list.append(val_acc / i)

        # plot center
        all_features = np.concatenate(all_features, 0)
        all_labels = np.concatenate(all_labels, 0)
        self._plot_features(all_features, all_labels, prefix="test")
    def _train(self):
        self._model.train()
        train_loss = 0.0
        train_acc = 0.0

        for i, (images, targets) in tqdm(enumerate(self._train_loader),
                                         total=len(self._train_loader),
                                         leave=False):
            images = images.cuda(non_blocking=True)
            targets = targets.cuda(non_blocking=True)

            # compute output, measure accuracy and record loss
            outputs = self._model(images)

            loss = self._criterion(outputs, targets)
            acc = accuracy(outputs, targets)[0]
            # acc = eval_metrics(targets, outputs, 2)[0]

            train_loss += loss.item()
            train_acc += acc.item()

            # compute gradient and do SGD step
            self._optimizer.zero_grad()
            loss.backward()
            self._optimizer.step()

        i += 1
        self._train_loss_list.append(train_loss / i)
        self._train_acc_list.append(train_acc / i)
Ejemplo n.º 3
0
    def _calc_acc_on_private_test_with_tta(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test with tta..")

        with torch.no_grad():
            for idx in tqdm(
                range(len(self._test_set)), total=len(self._test_set), leave=False
            ):
                images, targets = self._test_set[idx]
                targets = torch.LongTensor([targets])

                if not isinstance(images, list):
                    images = [images]
                images = torch.stack(images, 0)
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                outputs = F.softmax(outputs, 1)
                outputs = torch.sum(outputs, 0)
                outputs = torch.unsqueeze(outputs, 0)
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()

            test_acc = test_acc / (idx + 1)
        print("Accuracy on private test with tta: {:.3f}".format(test_acc))
        return test_acc
    def _val(self):
        self._model.eval()
        val_loss = 0.0
        val_acc = 0.0

        os.system("rm -rf debug/*")
        for i, (images, targets) in tqdm(enumerate(self._val_loader),
                                         total=len(self._val_loader),
                                         leave=False):
            images = images.cuda(non_blocking=True)
            targets = targets.cuda(non_blocking=True)

            # compute output, measure accuracy and record loss
            outputs = self._model(images)

            loss = self._criterion(outputs, targets)
            acc = accuracy(outputs, targets)[0]
            # acc = eval_metrics(targets, outputs, 2)[0]

            val_loss += loss.item()
            val_acc += acc.item()

            # debug time
            outputs = torch.squeeze(outputs, dim=0)
            outputs = torch.argmax(outputs, dim=0)
            tmp_image = torch.squeeze(images, dim=0)
            print(tmp_image.shape)
            tmp_image = tmp_image.cpu().numpy()
            cv2.imwrite("debug/{}/{}.png".format(outputs, i), tmp_image)

        i += 1
        self._val_loss.append(val_loss / i)
        self._val_acc.append(val_acc / i)
Ejemplo n.º 5
0
    def _calc_acc_on_private_test_with_tta(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test..")

        transform = transforms.Compose([
            transforms.ToPILImage(),
        ])

        for idx in len(self._test_set):
            image, label = self._test_set[idx]

        with torch.no_grad():
            for i, (images, targets) in tqdm(enumerate(self._test_loader),
                                             total=len(self._test_loader),
                                             leave=False):

                # TODO: implement augment when predict
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()

            test_acc = test_acc / (i + 1)
        print("Accuracy on private test: {:.3f}".format(test_acc))
        return test_acc
    def _train(self):
        self._model.train()
        train_loss = 0.0
        train_acc = 0.0

        # for plot center lloss
        all_features, all_labels = [], []

        for i, (images, targets) in tqdm(enumerate(self._train_loader),
                                         total=len(self._train_loader),
                                         leave=False):
            images = images.cuda(non_blocking=True)
            targets = targets.cuda(non_blocking=True)

            # compute output, measure accuracy and record loss
            outputs, features = self._model(images)

            loss = self._criterion(outputs, targets)

            if self._current_epoch_num > 20:
                loss_cent = self._criterion_cent(features, targets)
                loss_cent *= self._configs["cweight"]
                loss = loss + loss_cent

            acc = accuracy(outputs, targets)[0]

            train_loss += loss.item()
            train_acc += acc.item()

            # compute gradient and do SGD step
            self._optimizer.zero_grad()
            self._optimizer_cent.zero_grad()
            loss.backward()
            self._optimizer.step()

            if self._current_epoch_num > 20:
                # by doing so, weight_cent would not impact on the learning of centers
                for param in self._criterion_cent.parameters():
                    param.grad.data *= 1.0 / self._configs["cweight"]
                self._optimizer_cent.step()

            all_features.append(features.data.cpu().numpy())
            all_labels.append(targets.data.cpu().numpy())

        i += 1
        self._train_loss_list.append(train_loss / i)
        self._train_acc_list.append(train_acc / i)

        # plot center
        all_features = np.concatenate(all_features, 0)
        all_labels = np.concatenate(all_labels, 0)
        self._plot_features(all_features, all_labels, prefix="train")
    def _train(self):
        self._model.train()
        train_loss = 0.0
        train_acc = 0.0

        for i, (images, targets) in tqdm(enumerate(self._train_loader),
                                         total=len(self._train_loader),
                                         leave=False):
            images = images.cuda(non_blocking=True)
            targets = targets.cuda(non_blocking=True)

            # compute output, measure accuracy and record loss
            outputs = self._model(images)

            loss = self._criterion(outputs, targets)
            acc = accuracy(outputs, targets)[0]
            # acc = eval_metrics(targets, outputs, 2)[0]

            train_loss += loss.item()
            train_acc += acc.item()

            # compute gradient and do SGD step
            self._optimizer.zero_grad()
            loss.backward()
            self._optimizer.step()

            # log
            if i == 0:
                grid = torchvision.utils.make_grid(images)
                self._writer.add_image("images", grid, 0)
                # self._writer.add_graph(self._model, images)
                # self._writer.close()

            if self._configs["little"] == 1:
                mask = torch.squeeze(outputs, 0)
                mask = mask.detach().cpu().numpy() * 255
                mask = np.transpose(mask, (1, 2, 0)).astype(np.uint8)
                cv2.imwrite(
                    os.path.join("debug",
                                 "e{}.png".format(self._current_epoch_num)),
                    mask[..., 1],
                )

        i += 1
        self._train_loss.append(train_loss / i)
        self._train_acc.append(train_acc / i)
Ejemplo n.º 8
0
    def _calc_acc_on_private_test(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test..")
        with torch.no_grad():
            for i, (images, targets) in tqdm(
                enumerate(self._test_loader), total=len(self._test_loader), leave=False
            ):

                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                print(outputs.shape, outputs)
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()

            test_acc = test_acc / (i + 1)
        print("Accuracy on private test: {:.3f}".format(test_acc))
        return test_acc
    def _calc_acc_on_private_test_with_tta(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test with tta..")
        f = open(
            "private_test_log_{}_{}.txt".format(
                self._configs["arch"], self._configs["model_name"]
            ),
            "w",
        )

        with torch.no_grad():
            for idx in tqdm(
                range(len(self._test_set)), total=len(self._test_set), leave=False
            ):
                images, targets = self._test_set[idx]
                targets = torch.LongTensor([targets])

                images = make_batch(images)
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)

                outputs = torch.unsqueeze(outputs, 0)
                # print(outputs.shape)
                # TODO: try with softmax first and see the change
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()
                f.writelines("{}_{}\n".format(idx, acc.item()))

            test_acc = test_acc / (idx + 1)
        print("Accuracy on private test with tta: {:.3f}".format(test_acc))
        f.close()
        return test_acc
Ejemplo n.º 10
0
    def _calc_acc_on_private_test(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test..")
        f = open("private_test_log.txt", "w")
        with torch.no_grad():
            for i, (images, targets) in tqdm(enumerate(self._test_loader),
                                             total=len(self._test_loader),
                                             leave=False):
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                #print(outputs.shape, outputs)
                acc = accuracy(outputs, targets)[0]
                print(f"test image {i+1} acc: {acc.item()}")
                test_acc += acc.item()
                f.writelines("{}_{}\n".format(i, acc.item()))

            test_acc = test_acc / (i + 1)
        print("Accuracy on private test: {:.3f}".format(test_acc))
        f.close()
        return test_acc
    def _val(self):
        self._model.eval()
        val_loss = 0.0
        val_acc = 0.0

        with torch.no_grad():
            for i, (images, targets) in tqdm(enumerate(self._val_loader),
                                             total=len(self._val_loader),
                                             leave=False):
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                # compute output, measure accuracy and record loss
                outputs = self._model(images)

                loss = self._criterion(outputs, targets)
                acc = accuracy(outputs, targets)[0]

                val_loss += loss.item()
                val_acc += acc.item()

            i += 1
            self._val_loss_list.append(val_loss / i)
            self._val_acc_list.append(val_acc / i)
Ejemplo n.º 12
0
    def _calc_acc_on_private_test(self):
        self._model.eval()
        test_acc = 0.
        print('Calc acc on private test..')
        f = open('private_test_log.txt', 'w')
        with torch.no_grad():
            for i, (images, targets) in tqdm(enumerate(self._test_loader),
                                             total=len(self._test_loader),
                                             leave=False):

                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                print(outputs.shape, outputs)
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()
                f.writelines("{}_{}\n".format(i, acc.item()))

            test_acc = test_acc / (i + 1)
        print("Accuracy on private test: {:.3f}".format(test_acc))
        print("Checkpoint saved at", self._checkpoint_path)
        f.close()
        return test_acc