Exemple #1
0
    def __prepare_test__(self, root):
        self.test_images_dict = Utility.load_images_to_dict(self.test_images_folder, "JPEG")
        self.test_images_files = list(self.test_images_dict.keys())

        self.test_truth_labels = Utility.loadTsvAsDict(join(self.test_annotations_folder, "val_annotations.txt"))
        self.test_truth_labels = dict(
            (join(self.test_images_folder, f), self.test_truth_labels[f]) for f in self.test_truth_labels)
        self.data_size = len(self.test_images_files)
Exemple #2
0
    def test(self, loader, epoch_num):

        Utility.cleanup()
        log.info(f"Finished cleanup for epoch {epoch_num}")

        self.model.eval()

        pbar = tqdm(loader, ncols=1000)
        total_loss = 0
        summary_loss = 0

        metrices = []
        num_batches = len(loader)
        log.info(f"Tester starting the testing for epoch: {epoch_num}")

        with torch.no_grad():
            for idx, data in enumerate(pbar):

                x = torch.cat((data['bg'], data['fg_bg']), dim=1).to(device=self.device)
                data['fg_bg_mask'] = data['fg_bg_mask'].to(self.device)
                data['fg_bg_depth'] = data['fg_bg_depth'].to(self.device)

                log.info(f"Starting the testing for batch:{idx}")
                (loss, mask, depth) = self.__test_one_batch__(x, data['fg_bg_mask'], data['fg_bg_depth'])
                log.info(f"End of the testing for batch:{idx}")

                total_loss += loss
                summary_loss += loss

                if self.persister is not None:
                    self.persister(data, mask, epoch_num, "mask")
                    self.persister(data, depth, epoch_num, "depth")
                    log.info(f"Persisted the prediction for batch:{idx}")

                if self.metric_fn is not None:
                    metric = self.metric_fn(data, mask)
                    metrices.append(metric)
                    log.info(f"Computed the metric for batch:{idx}")

                if ((idx + 1) % 500 == 0 or idx == num_batches - 1):
                    self.writer.write_pred_summary(data, mask, depth)
                    l = summary_loss / 500
                    if idx == num_batches - 1:
                        l = summary_loss / ((idx + 1) % 500)
                    self.writer.write_scalar_summary('test loss', l, epoch_num * num_batches + idx)
                    summary_loss = 0

                pbar.set_description(desc=f'Loss={loss}\t id={idx}\t')
                log.info(f"For test batch {idx} loss is {loss}")
                del loss, mask, depth, data
                log.info(f"Completed the training for batch:{idx}")

        metric = None
        if self.metric_fn is not None:
            metric = self.metric_fn.aggregate(metrices)
        return PredictionResult(total_loss / len(loader.dataset), metric)
Exemple #3
0
def showRandomImages(data, targets, predictions=None, classes=None, count=20, muSigmaPair=None):
    randImages = Utility.pickRandomElements(data, count)
    images = data[randImages]

    if (muSigmaPair is not None):
        images = Utility.unnormalize(images, muSigmaPair[0], muSigmaPair[1])

    images = images.permute(0, 2, 3, 1)

    targets = __getLabels(targets, randImages, classes)
    if predictions is not None:
        predictions = __getLabels(predictions, randImages, classes)

    showImages(images.numpy(), targets, predictions, cols=5)
Exemple #4
0
def download_CIFAR10(train_transforms, test_transforms, batch_size=128, isCuda=Utility.isCuda()):
    """
        Load CIFAR10 dataset. Uses the provided train_transforms and the test_transforms and create a object of Data.

        :param train_transforms: Transfomrations for train
        :param test_transforms: Transformations for test
        :param batch_size: Default value is 128
        :param isCuda: Default value is True
        :return: Data
        """
    dataloader_args = dict(shuffle=True, batch_size=batch_size, num_workers=4, pin_memory=True) if isCuda else dict(
        shuffle=True, batch_size=batch_size)

    train_data = datasets.CIFAR10("../data", train=True, transform=train_transforms, download=True)
    train_loader = torch.utils.data.DataLoader(train_data, **dataloader_args)

    test_data = datasets.CIFAR10("../data", train=False, transform=test_transforms, download=True)
    test_loader = torch.utils.data.DataLoader(test_data, **dataloader_args)

    print(f'Shape of a train data batch: {shape(train_loader)}')
    print(f'Shape of a test data batch: {shape(test_loader)}')

    print(f'Number of train images: {len(train_data.data)}')
    print(f'Number of test images: {len(test_data.data)}')

    classes = ('plane', 'car', 'bird', 'cat',
               'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    return Data(train_loader, test_loader, classes)
Exemple #5
0
 def plt_images(images, start_idx):
     for idx, img in enumerate(images):
         img = img.to(Utility.getCpu())
         plt.subplot(rows, cols, start_idx + idx)
         plt.axis("off")
         plt.imshow(np.asarray(img.squeeze()), cmap='gray')
     return start_idx + len(images)
Exemple #6
0
 def __init__(self, model, loss_fn, persister=None, metric_fn=None, device=Utility.getDevice(),
              summary_writer=ModelSummaryWriter(name="-test")):
     self.device = device
     self.loss_fn = loss_fn
     self.model = model
     self.persister = persister
     self.metric_fn = metric_fn
     self.writer = summary_writer
Exemple #7
0
    def visualize(self,
                  data,
                  data_targets,
                  classes,
                  count=5,
                  muSigPair=None,
                  figSize=(15, 15)):

        heatmaps, cam_pred = self.gradCam(data)
        randIndices = Utility.pickRandomElements(data, count)

        rand_data, rand_targets, rand_cam_pred = data[
            randIndices], data_targets[randIndices], cam_pred[randIndices]
        rand_superImposedImages = {}
        for layer in heatmaps:
            rand_superImposedImages[layer] = self.superImpose(
                rand_data, heatmaps[layer], muSigPair)

        self.plot(Utility.toImages(rand_data, muSigPair), rand_targets,
                  rand_cam_pred, rand_superImposedImages, classes, figSize)
Exemple #8
0
def showLoaderImages(loader, classes=None, count=20, muSigmaPair=None):
    """

    Takes random images from the loader and shows the images.
    Optionally Mean and Sigma pair can be passed to unnormalize data before showing the image.

    :param muSigmaPair: Default is (0, 1)
    """
    d, l = iter(loader).next()

    randImages = Utility.pickRandomElements(d, count)
    images = d[randImages]

    if (muSigmaPair is not None):
        images = Utility.unnormalize(images, muSigmaPair[0], muSigmaPair[1])

    # Loader has the channel at 1 index. But the show images need channel at the end.
    images = images.permute(0, 2, 3, 1)
    labels = __getLabels(l, randImages, classes)
    showImages(images.numpy(), labels, cols=5)
Exemple #9
0
    def __init__(self, model, loss_fn, optimizer, scheduler, persister=None, metric_fn=None,
                 device=Utility.getDevice(), run_name="-model-trainer",
                 summary_writer=None):

        self.optimizer = optimizer
        self.device = device
        self.loss_fn = loss_fn
        self.scheduler = scheduler
        self.model = model
        self.persister = persister
        self.metric_fn = metric_fn
        self.writer = summary_writer
Exemple #10
0
    def superImpose(self, data, heatMapImages, muSigPair):

        superImposedImages = []
        images = Utility.toImages(data, muSigPair)
        for i, image in enumerate(images):
            image = np.uint8(255 * image)
            heatmap = 1 - heatMapImages[i]
            heatmap = np.uint8(255 * heatmap.squeeze())
            heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)

            superImposed = cv2.addWeighted(image, 0.5, heatmap, 0.5, 0)
            superImposedImages.append(superImposed)

        return superImposedImages
Exemple #11
0
    def __init__(self, model, data, loss_fn, optimizer, checkpoint=None, model_path=None, scheduler=None,
                 metric_fn=None,
                 train_pred_persister=None, test_pred_persister=None, device=Utility.getDevice(),
                 train_summary_writer=ModelSummaryWriter(name="-train"),
                 test_summary_writer=ModelSummaryWriter(name="-test")):
        self.model = model
        self.lossFn = loss_fn
        self.optimizer = optimizer
        self.data = data
        self.checkpoint = checkpoint
        self.model_path = model_path

        self.trainer = ModelTrainer(model=model, loss_fn=loss_fn, optimizer=optimizer,
                                    scheduler=optimizer if scheduler is None else scheduler,
                                    metric_fn=metric_fn, persister=train_pred_persister,
                                    summary_writer=train_summary_writer)
        self.tester = ModelTester(model=model, loss_fn=loss_fn, persister=test_pred_persister, metric_fn=metric_fn,
                                  device=device, summary_writer=test_summary_writer)
Exemple #12
0
    def test(self, model, loader, lossFn, device=Utility.getDevice()):
        model.eval()
        pbar = tqdm(loader, ncols=1000)
        wholePred = []
        wholeData = []
        wholeTarget = []
        totalLoss = 0
        with torch.no_grad():
            for idx, (data, target) in enumerate(pbar):
                data, target = data.to(device), target.to(device)
                (loss,
                 prediction) = self.__test_one_batch(model, data, target,
                                                     lossFn)
                totalLoss += loss
                wholePred.append(prediction)
                wholeData.append(data)
                wholeTarget.append(target)

        return PredictionResult(torch.cat(wholeData), torch.cat(wholePred),
                                torch.cat(wholeTarget),
                                totalLoss / len(loader.dataset))
Exemple #13
0
def main():
    print("Gradcam Test")

    net = models.resnet34(pretrained=True)
    net.to(Utility.getDevice())
    summary(net, input_size=(3, 224, 224))

    classes = getImageNetClasses()

    transforms = Compose([
        ToTensor()
    ])
    loader = DataUtility.loadImages("resources/processed-images", Alb(transforms))

    layers = ["layer4", "layer3", "layer2", "layer1"]

    cam = GradCam(net, layers)
    analyzer = Analyzer(cam)

    d, l = iter(loader).next()
    analyzer.visualize(d, l, classes)
Exemple #14
0
    def fit(self, epoch, device=Utility.getDevice()):
        train_accs = []
        train_losses = []
        test_accs = []
        test_losses = []
        learning_rates = []
        for e in range(0, epoch):
            print(f'\n\nEpoch: {e + 1}')

            learning_rate = self.optimizer.param_groups[0]['lr']
            learning_rates.append(learning_rate)
            train_result = self.trainer.train_one_epoch(
                self.model,
                self.data.train,
                self.optimizer,
                device=device,
                lossFn=self.lossFn,
                scheduler=self.scheduler)
            trainAcc = MetricsUtility.compute_accuracy(
                train_result.predictions, train_result.targets)
            train_accs.append(trainAcc)
            train_losses.append(train_result.loss)

            print(
                f'Train Accuracy: {trainAcc}%, Train Loss: {train_result.loss}, Learning Rate: {learning_rate}'
            )

            test_result = self.tester.test(self.model,
                                           self.data.test,
                                           lossFn=self.lossFn,
                                           device=device)
            testAcc = MetricsUtility.compute_accuracy(test_result.predictions,
                                                      test_result.targets)
            test_accs.append(testAcc)
            test_losses.append(test_result.loss)
            print(f'Test Accuracy: {testAcc}%, Test Loss: {test_result.loss}')

        return ModelBuildResult(train_accs, train_losses, test_accs,
                                test_losses, learning_rates)
Exemple #15
0
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)

mean_array = np.array([*mean])

train_transforms = Compose([
    PadIfNeeded(40, 40, always_apply=True, p=1.0),
    RandomCrop(32, 32, always_apply=True, p=1.0),
    HorizontalFlip(p=0.5),
    Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=np.array([*mean]) * 255.0, p=0.75),
    Normalize(mean, std),
    ToTensor()
])

test_transforms = Compose([
    Normalize(mean, std),
    ToTensor()
])

data = DataUtility.download_CIFAR10(Alb(train_transforms), Alb(test_transforms), batch_size=512)

criterion = F.nll_loss
net = S11Resnet().to(Utility.getDevice())
optimizer = optim.SGD(net.parameters(), lr = 1e-5, momentum=0.9)
finder = LRFinder(net, optimizer, criterion, Utility.getDevice())

finder.range_test(data.train, val_loader=data.test, start_lr=1e-5, end_lr=1e-4,
                     num_iter=2, step_mode="linear")
finder.plot()
finder.reset()
Exemple #16
0
def loadTinyImagenet(data_folder, train_transforms, test_transforms, batch_size=128, isCuda=Utility.isCuda()):
    dataloader_args = dict(shuffle=True, batch_size=batch_size, num_workers=4, pin_memory=True) if isCuda else dict(
        shuffle=True, batch_size=batch_size)

    train_data = ImageNet.TinyImageNet(data_folder, train=True, transform=train_transforms)
    train_loader = torch.utils.data.DataLoader(train_data, **dataloader_args)

    test_data = ImageNet.TinyImageNet(data_folder, train=False, transform=test_transforms)
    test_loader = torch.utils.data.DataLoader(test_data, **dataloader_args)

    print(f'Shape of a train data batch: {shape(train_loader)}')
    print(f'Shape of a test data batch: {shape(test_loader)}')

    print(f'Number of train images: {len(train_data)}')
    print(f'Number of test images: {len(test_data)}')

    return Data(train_loader, test_loader, train_data.idx_class)
Exemple #17
0
 def __load_classes__(self, root):
     self.idx_class = Utility.loadFileToArray(root + "/wnids.txt")
     self.class_idx = dict((c, i) for i, c in enumerate(self.idx_class))
Exemple #18
0
trans = transforms.Compose([transforms.ToTensor()])

dataset = DepthDataset("data/tiny_data/", trans, trans, trans, trans)
train_dataset = torch.utils.data.Subset(dataset, list(range(8)))
test_dataset = torch.utils.data.Subset(dataset, list(range(16, 20)))
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           shuffle=True,
                                           batch_size=2)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          shuffle=True,
                                          batch_size=2)

# dataset.show_images(5)

model = ResUNet(6, 1).to(Utility.getDevice())
summary(model, (6, 224, 224))

optimizer = optim.SGD(model.parameters(), lr=1e-5, momentum=0.9)

lossFn = Loss_fn(BCEWithLogitsLoss(), BCEWithLogitsLoss(), 1, 1)

builder = ModelBuilder(model=model,
                       optimizer=optimizer,
                       device=Utility.getDevice(),
                       loss_fn=lossFn,
                       scheduler=optimizer,
                       data=Data(train_loader, test_loader))

result = builder.fit(1)
print(result)
Exemple #19
0
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)

mean_array = np.array([*mean])

train_transforms = Compose([
    PadIfNeeded(40, 40, always_apply=True, p=1.0),
    RandomCrop(32, 32, always_apply=True, p=1.0),
    HorizontalFlip(p=0.5),
    Cutout(num_holes=1,
           max_h_size=8,
           max_w_size=8,
           fill_value=np.array([*mean]) * 255.0,
           p=0.75),
    Normalize(mean, std),
    ToTensor()
])

test_transforms = Compose([Normalize(mean, std), ToTensor()])

data = DataUtility.download_CIFAR10(Alb(train_transforms),
                                    Alb(test_transforms),
                                    batch_size=512)

net = S11Resnet().to(Utility.getDevice())
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
builder = ModelBuilder(net, data, LossFn(F.nll_loss, l2Factor=0.01, model=net),
                       optimizer)
result = builder.fit(1)
Exemple #20
0
    def train_one_epoch(self, loader, epoch_num):

        Utility.cleanup()
        log.info(f"Finished cleanup for epoch {epoch_num}")

        self.model.train()
        pbar = tqdm(loader, ncols=1000)

        total_loss = 0
        summary_loss = 0
        metrices = []

        num_batches = len(loader)
        log.info(f"Trainer starting the training for epoch: {epoch_num}")
        for idx, data in enumerate(pbar):

            log.info(f"Obtained the data for batch:{idx}")

            x = torch.cat((data['bg'], data['fg_bg']), dim=1).to(self.device)
            data['fg_bg_mask'] = data['fg_bg_mask'].to(self.device)
            data['fg_bg_depth'] = data['fg_bg_depth'].to(self.device)

            log.info(f"Starting the training for batch:{idx}")
            (loss, mask, depth) = self.__train_one_batch__(x, data['fg_bg_mask'], data['fg_bg_depth'])
            log.info(f"End of the training for batch:{idx}")

            total_loss += loss
            summary_loss += loss

            self.scheduler.step()
            log.info(f"Scheduler step for the batch:{idx}")

            if self.persister is not None:
                self.persister(data, mask, epoch_num, "mask")
                self.persister(data, depth, epoch_num, "depth")
                log.info(f"Persisted the prediction for batch:{idx}")

            if self.metric_fn is not None:
                metric = self.metric_fn(data, mask)
                metrices.append(metric)
                log.info(f"Computed the metric for batch:{idx}")

            lr = self.optimizer.param_groups[0]['lr']
            pbar.set_description(desc=f'id={idx}\t Loss={loss}\t LR={lr}\t')
            log.info(f"For train batch {idx} loss is {loss} and lr is {lr}")

            if ((idx + 1) % 500 == 0 or idx == num_batches - 1):
                self.writer.write_pred_summary(data, mask.detach(), depth.detach())
                l = summary_loss / 500
                if idx == num_batches - 1:
                    l = summary_loss / ((idx + 1) % 500)
                self.writer.write_scalar_summary('train loss', l, epoch_num * num_batches + idx)
                self.writer.write_scalar_summary('lr', lr, epoch_num * num_batches + idx)
                summary_loss = 0

            del loss, mask, depth, data
            log.info(f"Completed the training for batch:{idx}")

        metric = None
        if self.metric_fn is not None:
            metric = self.metric_fn.aggregate(metrices)
        return PredictionResult(total_loss / len(loader.dataset), metric)