def predict(trainer, data_loader, t_map, cuda=False):
    y_true = []
    y_pred = []
    lens = []
    treelists = []

    tot_length = len(data_loader)
    tot_loss = 0

    ivt_t_map = {v: k for k, v in t_map.items()}

    loss_meter = meters.AverageMeter()
    with tqdm(data_loader, total=tot_length) as pbar:
        for sample in pbar:
            target = sample['target']
            loss = trainer.valid_step(sample)
            output_dict, pred = trainer.pred_step(sample)
            trees = sample['tree']

            if 'att_weight' in output_dict:
                att_weights = [
                    item.numpy().flatten().tolist()
                    for item in output_dict['att_weight']
                ]

                if cuda:
                    att_weights = [item.cpu() for item in att_weights]

                # assign attention scores
                for treelist, att_weight in zip(trees, att_weights):
                    for t, s in zip(treelist, att_weight):
                        t.val = s

                    treelists.append(treelist)

            if cuda:
                pred = pred.cpu()  # cast back to cpu

            tot_loss += loss
            y_true.append(target.view(-1).numpy().tolist())
            y_pred.append(pred.view(-1).numpy().tolist())
            lens.append([len(s) for s in sample['feature']])

            loss_meter.update(loss)
            pbar.set_postfix(
                collections.OrderedDict([
                    ('loss', '{:.4f} ({:.4f})'.format(loss, loss_meter.avg))
                ]))

    y_true = list(chain.from_iterable(y_true))
    y_pred = list(chain.from_iterable(y_pred))
    lens = list(chain.from_iterable(lens))
    y_true_label = [ivt_t_map[i] for i in y_true]
    y_pred_label = [ivt_t_map[i] for i in y_pred]

    pred_tup = list(zip(y_true, y_pred, lens))

    f1_by_len = utils.analyze_f1_by_length(pred_tup, t_map)
    print(f1_by_len)
    return y_true_label, y_pred_label, treelists, f1_by_len
    def validate(self, net, samples, e):
        transforms = generator.TransformationsGenerator([])
        dataset = datasets.ImageDataset(samples, settings.train, transforms)
        dataloader = DataLoader(
            dataset,
            num_workers=10,
            batch_size=16
        )

        average_meter_val = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=True, ascii=True) as pbar, torch.no_grad():
            net.eval()

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions = self.predict(net, images)

                self.update_pbar(
                    masks_predictions,
                    masks_targets,
                    pbar,
                    average_meter_val,
                    'Validation epoch {}'.format(e)
                )

        val_stats = {'val_' + k: v for k, v in average_meter_val.get_all().items()}
        return val_stats
def evaluate(trainer, data_loader, t_map, cuda=False):
    y_true = []
    y_pred = []
    tot_length = len(data_loader)
    tot_loss = 0
    loss_meter = meters.AverageMeter()
    with tqdm(data_loader, total=tot_length) as pbar:
        for sample in pbar:
            target = sample['target']
            loss = trainer.valid_step(sample)
            _, pred = trainer.pred_step(sample)
            if cuda:
                pred = pred.cpu() # cast back to cpu
            tot_loss += loss
            y_true.append(target.numpy().tolist())
            y_pred.append(pred.numpy().tolist())
            loss_meter.update(loss)
            pbar.set_postfix(collections.OrderedDict([
                    ('loss', '{:.4f} ({:.4f})'.format(loss, loss_meter.avg))
                    ]))
    
    y_true = list(chain.from_iterable(y_true))
    y_pred = list(chain.from_iterable(y_pred))
    ivt_t_map = {v:k for k, v in t_map.items()}
    labels = [k for k,v in ivt_t_map.items() if v != 'null']
    t_names = [ivt_t_map[l] for l in labels]
    prec, rec, f1 = utils.evaluate(y_true, y_pred, labels=labels, target_names=t_names)        
    avg_loss = tot_loss / tot_length
    return prec, rec, f1, avg_loss
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((50 - e) / 50))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(
                image_size=101,
                translation=lambda rs: (rs.randint(-10, 10), rs.randint(-10, 10)),
                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                rotation=lambda rs: rs.randint(-5, 5),
                **utils.transformations_options
            ),
            transformations.Padding(((13, 14), (13, 14), (0, 0)))
        ])

        transforms_image = generator.TransformationsGenerator([
            random.RandomColorPerturbation(std=1)
        ])

        dataset = datasets.ImageDataset(samples, settings.train, transforms, transforms_image=transforms_image)
        dataloader = DataLoader(
            dataset,
            num_workers=10,
            batch_size=16,
            shuffle=True
        )

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=False) as pbar, torch.enable_grad():
            net.train()

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions, aux_pam, aux_cam = net(images)

                loss_pam = criterion(F.interpolate(aux_pam, size=128, mode='bilinear'), masks_targets)
                loss_cam = criterion(F.interpolate(aux_cam, size=128, mode='bilinear'), masks_targets)

                loss_segmentation = criterion(masks_predictions, masks_targets)
                loss = loss_segmentation + loss_pam + loss_cam

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(
                    torch.sigmoid(masks_predictions),
                    masks_targets,
                    pbar,
                    average_meter_train,
                    'Training epoch {}'.format(e)
                )

        train_stats = {'train_' + k: v for k, v in average_meter_train.get_all().items()}
        return train_stats
Exemplo n.º 5
0
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((100 - e) / 100))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(image_size=101,
                                translation=lambda rs:
                                (rs.randint(-20, 20), rs.randint(-20, 20)),
                                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                                **utils.transformations_options),
            transformations.Padding(((13, 14), (13, 14), (0, 0)))
        ])

        pseudo_dataset = datasets.SemiSupervisedImageDataset(
            samples_test,
            settings.test,
            transforms,
            size=len(samples_test),
            test_predictions=self.test_predictions,
            momentum=0.0)

        dataset = datasets.ImageDataset(samples, settings.train, transforms)
        weights = [len(pseudo_dataset) / len(dataset) * 2
                   ] * len(dataset) + [1] * len(pseudo_dataset)
        dataloader = DataLoader(ConcatDataset([dataset, pseudo_dataset]),
                                num_workers=10,
                                batch_size=16,
                                sampler=WeightedRandomSampler(
                                    weights=weights, num_samples=3200))

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader),
                  leave=False) as pbar, torch.enable_grad():
            net.train()

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions = net(images)

                loss = criterion(masks_predictions, masks_targets)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(torch.sigmoid(masks_predictions),
                                 masks_targets, pbar, average_meter_train,
                                 'Training epoch {}'.format(e))

        train_stats = {
            'train_' + k: v
            for k, v in average_meter_train.get_all().items()
        }
        return train_stats
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((50 - e) / 50))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(
                image_size=101,
                translation=lambda rs: (rs.randint(-20, 20), rs.randint(-20, 20)),
                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                **utils.transformations_options
            )
        ])

        dataset = datasets.ImageDataset(samples, settings.train, transforms)
        dataloader = DataLoader(
            dataset,
            num_workers=10,
            batch_size=16,
            shuffle=True
        )

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=False, ascii=True) as pbar, torch.enable_grad():
            net.train()

            padding = tta.Pad((13, 14, 13, 14))

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions = padding.transform_backward(net(padding.transform_forward(images))).contiguous()

                loss = criterion(masks_predictions, masks_targets)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(
                    torch.sigmoid(masks_predictions),
                    masks_targets,
                    pbar,
                    average_meter_train,
                    'Training epoch {}'.format(e)
                )

        train_stats = {'train_' + k: v for k, v in average_meter_train.get_all().items()}
        return train_stats
Exemplo n.º 7
0
def train(data_loader, trainer, epoch):
    tot_length = sum(map(lambda t:len(t), data_loader))
    loss_meter = meters.AverageMeter()
    lr = trainer.get_lr()
    with tqdm(chain.from_iterable(data_loader), total=tot_length, desc=' Epoch {}'.format(epoch)) as pbar:
            for sample in pbar:
                loss, t = trainer.train_step(sample)
                loss_meter.update(loss)
                pbar.set_postfix(collections.OrderedDict([
                        ('loss', '{:.4f} ({:.4f})'.format(loss, loss_meter.avg)),
                        ('lr', '{:.4f}'.format(lr)),
                        ('t0', t['prepare']),
                        ('t1', t['forward']),
                        ('t2', t['backward']),
                        ]))
    return loss_meter.avg
def train(data_loader, trainer, epoch, q=None):
    tot_length = len(data_loader)
    loss_meter = meters.AverageMeter()
    lr = trainer.get_lr()
    with tqdm(data_loader, total=tot_length, desc=' Epoch {}'.format(epoch)) as pbar:
        for sample in pbar:
            loss, t = trainer.train_step(sample)
            loss_meter.update(loss)
            pbar.set_postfix(collections.OrderedDict([
                    ('loss', '{:.4f} ({:.4f})'.format(loss, loss_meter.avg)),
                    ('lr', '{:.4f}'.format(lr)),
                    ('t0', t['prepare']),
                    ('t1', t['forward']),
                    ('t2', t['backward']),
                    ('pid', '{:d}'.format(os.getpid()))
                    ]))
    epoch_loss = loss_meter.avg
    if q is None:
        return epoch_loss
    else:
        q.put(epoch_loss)
Exemplo n.º 9
0
    def create_df(self):
        path_csv = os.path.join('./logs', 'experiments')

        average_meter = meters.AverageMeter()
        for i, stats in self.stats.items():
            for k, v in stats.items():
                average_meter.add(k, v)

        df = pd.DataFrame(average_meter.get_all(), index=[self.name])
        df = df.rename_axis("experiment")
        if os.path.exists(path_csv):
            old_df = pd.read_csv(path_csv, delim_whitespace=True, index_col=0)
            if self.mode == 'val':
                old_df = old_df[['val_iou', 'val_mAP']]

            df = pd.concat([df, old_df])

            df = df[~df.index.duplicated(keep='first')]

        df = df.sort_values(by='val_mAP', ascending=False)

        return df
Exemplo n.º 10
0
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((50 - e) / 50))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(image_size=101,
                                translation=lambda rs:
                                (rs.randint(-20, 20), rs.randint(-20, 20)),
                                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                                **utils.transformations_options)
        ])

        samples_aux = list(
            set(samples).intersection(set(utils.get_aux_samples())))
        dataset_aux = datasets.BoundaryImageDataset(samples_aux,
                                                    settings.train, transforms)

        dataset_pseudo = datasets.BoundarySemiSupervisedImageDataset(
            samples_test,
            settings.test,
            transforms,
            size=len(samples_test),
            test_predictions=self.test_predictions,
            momentum=0.0)

        dataset = datasets.BoundaryImageDataset(samples, settings.train,
                                                transforms)
        weight_train = len(dataset_pseudo) / len(dataset) * 2
        weight_aux = weight_train / 2
        weights = [weight_train] * len(dataset) + [weight_aux] * len(
            dataset_aux) + [1] * len(dataset_pseudo)
        dataloader = DataLoader(
            ConcatDataset([dataset, dataset_aux, dataset_pseudo]),
            num_workers=10,
            batch_size=16,
            sampler=WeightedRandomSampler(weights=weights, num_samples=3200))

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=False,
                  ascii=True) as pbar, torch.enable_grad():
            net.train()

            padding = tta.Pad((13, 14, 13, 14))
            criterion_boundary = losses.BCEWithLogitsLoss()

            for images, masks_targets, boundary_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                boundary_targets = boundary_targets.to(gpu)

                masks_predictions, boundary_predictions = net(
                    padding.transform_forward(images))
                masks_predictions = padding.transform_backward(
                    masks_predictions).contiguous()
                boundary_predictions = padding.transform_backward(
                    boundary_predictions).contiguous()

                loss = 0.1 * criterion(
                    masks_predictions, masks_targets) + criterion_boundary(
                        boundary_predictions, boundary_targets)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(torch.sigmoid(masks_predictions),
                                 masks_targets, pbar, average_meter_train,
                                 'Training epoch {}'.format(e))

        train_stats = {
            'train_' + k: v
            for k, v in average_meter_train.get_all().items()
        }
        return train_stats
Exemplo n.º 11
0
 def __init__(self):
     self.meter = meters.AverageMeter()