Exemple #1
0
 def __init__(self, model, loss_fn, persister=None, metric_fn=None, device=Utility.getDevice(),
              summary_writer=ModelSummaryWriter(name="-test")):
     self.device = device
     self.loss_fn = loss_fn
     self.model = model
     self.persister = persister
     self.metric_fn = metric_fn
     self.writer = summary_writer
Exemple #2
0
    def __init__(self, model, loss_fn, optimizer, scheduler, persister=None, metric_fn=None,
                 device=Utility.getDevice(), run_name="-model-trainer",
                 summary_writer=None):

        self.optimizer = optimizer
        self.device = device
        self.loss_fn = loss_fn
        self.scheduler = scheduler
        self.model = model
        self.persister = persister
        self.metric_fn = metric_fn
        self.writer = summary_writer
Exemple #3
0
    def __init__(self, model, data, loss_fn, optimizer, checkpoint=None, model_path=None, scheduler=None,
                 metric_fn=None,
                 train_pred_persister=None, test_pred_persister=None, device=Utility.getDevice(),
                 train_summary_writer=ModelSummaryWriter(name="-train"),
                 test_summary_writer=ModelSummaryWriter(name="-test")):
        self.model = model
        self.lossFn = loss_fn
        self.optimizer = optimizer
        self.data = data
        self.checkpoint = checkpoint
        self.model_path = model_path

        self.trainer = ModelTrainer(model=model, loss_fn=loss_fn, optimizer=optimizer,
                                    scheduler=optimizer if scheduler is None else scheduler,
                                    metric_fn=metric_fn, persister=train_pred_persister,
                                    summary_writer=train_summary_writer)
        self.tester = ModelTester(model=model, loss_fn=loss_fn, persister=test_pred_persister, metric_fn=metric_fn,
                                  device=device, summary_writer=test_summary_writer)
Exemple #4
0
    def test(self, model, loader, lossFn, device=Utility.getDevice()):
        model.eval()
        pbar = tqdm(loader, ncols=1000)
        wholePred = []
        wholeData = []
        wholeTarget = []
        totalLoss = 0
        with torch.no_grad():
            for idx, (data, target) in enumerate(pbar):
                data, target = data.to(device), target.to(device)
                (loss,
                 prediction) = self.__test_one_batch(model, data, target,
                                                     lossFn)
                totalLoss += loss
                wholePred.append(prediction)
                wholeData.append(data)
                wholeTarget.append(target)

        return PredictionResult(torch.cat(wholeData), torch.cat(wholePred),
                                torch.cat(wholeTarget),
                                totalLoss / len(loader.dataset))
Exemple #5
0
def main():
    print("Gradcam Test")

    net = models.resnet34(pretrained=True)
    net.to(Utility.getDevice())
    summary(net, input_size=(3, 224, 224))

    classes = getImageNetClasses()

    transforms = Compose([
        ToTensor()
    ])
    loader = DataUtility.loadImages("resources/processed-images", Alb(transforms))

    layers = ["layer4", "layer3", "layer2", "layer1"]

    cam = GradCam(net, layers)
    analyzer = Analyzer(cam)

    d, l = iter(loader).next()
    analyzer.visualize(d, l, classes)
Exemple #6
0
    def fit(self, epoch, device=Utility.getDevice()):
        train_accs = []
        train_losses = []
        test_accs = []
        test_losses = []
        learning_rates = []
        for e in range(0, epoch):
            print(f'\n\nEpoch: {e + 1}')

            learning_rate = self.optimizer.param_groups[0]['lr']
            learning_rates.append(learning_rate)
            train_result = self.trainer.train_one_epoch(
                self.model,
                self.data.train,
                self.optimizer,
                device=device,
                lossFn=self.lossFn,
                scheduler=self.scheduler)
            trainAcc = MetricsUtility.compute_accuracy(
                train_result.predictions, train_result.targets)
            train_accs.append(trainAcc)
            train_losses.append(train_result.loss)

            print(
                f'Train Accuracy: {trainAcc}%, Train Loss: {train_result.loss}, Learning Rate: {learning_rate}'
            )

            test_result = self.tester.test(self.model,
                                           self.data.test,
                                           lossFn=self.lossFn,
                                           device=device)
            testAcc = MetricsUtility.compute_accuracy(test_result.predictions,
                                                      test_result.targets)
            test_accs.append(testAcc)
            test_losses.append(test_result.loss)
            print(f'Test Accuracy: {testAcc}%, Test Loss: {test_result.loss}')

        return ModelBuildResult(train_accs, train_losses, test_accs,
                                test_losses, learning_rates)
Exemple #7
0
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)

mean_array = np.array([*mean])

train_transforms = Compose([
    PadIfNeeded(40, 40, always_apply=True, p=1.0),
    RandomCrop(32, 32, always_apply=True, p=1.0),
    HorizontalFlip(p=0.5),
    Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=np.array([*mean]) * 255.0, p=0.75),
    Normalize(mean, std),
    ToTensor()
])

test_transforms = Compose([
    Normalize(mean, std),
    ToTensor()
])

data = DataUtility.download_CIFAR10(Alb(train_transforms), Alb(test_transforms), batch_size=512)

criterion = F.nll_loss
net = S11Resnet().to(Utility.getDevice())
optimizer = optim.SGD(net.parameters(), lr = 1e-5, momentum=0.9)
finder = LRFinder(net, optimizer, criterion, Utility.getDevice())

finder.range_test(data.train, val_loader=data.test, start_lr=1e-5, end_lr=1e-4,
                     num_iter=2, step_mode="linear")
finder.plot()
finder.reset()
Exemple #8
0
trans = transforms.Compose([transforms.ToTensor()])

dataset = DepthDataset("data/tiny_data/", trans, trans, trans, trans)
train_dataset = torch.utils.data.Subset(dataset, list(range(8)))
test_dataset = torch.utils.data.Subset(dataset, list(range(16, 20)))
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           shuffle=True,
                                           batch_size=2)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          shuffle=True,
                                          batch_size=2)

# dataset.show_images(5)

model = ResUNet(6, 1).to(Utility.getDevice())
summary(model, (6, 224, 224))

optimizer = optim.SGD(model.parameters(), lr=1e-5, momentum=0.9)

lossFn = Loss_fn(BCEWithLogitsLoss(), BCEWithLogitsLoss(), 1, 1)

builder = ModelBuilder(model=model,
                       optimizer=optimizer,
                       device=Utility.getDevice(),
                       loss_fn=lossFn,
                       scheduler=optimizer,
                       data=Data(train_loader, test_loader))

result = builder.fit(1)
print(result)
Exemple #9
0
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)

mean_array = np.array([*mean])

train_transforms = Compose([
    PadIfNeeded(40, 40, always_apply=True, p=1.0),
    RandomCrop(32, 32, always_apply=True, p=1.0),
    HorizontalFlip(p=0.5),
    Cutout(num_holes=1,
           max_h_size=8,
           max_w_size=8,
           fill_value=np.array([*mean]) * 255.0,
           p=0.75),
    Normalize(mean, std),
    ToTensor()
])

test_transforms = Compose([Normalize(mean, std), ToTensor()])

data = DataUtility.download_CIFAR10(Alb(train_transforms),
                                    Alb(test_transforms),
                                    batch_size=512)

net = S11Resnet().to(Utility.getDevice())
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
builder = ModelBuilder(net, data, LossFn(F.nll_loss, l2Factor=0.01, model=net),
                       optimizer)
result = builder.fit(1)