Beispiel #1
0
def validate(model, data_loader, criterion):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'acc')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack['img']

            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss1 = criterion(x, label)
            acc = accuracy(x, label)

            val_loss_meter.add({'loss1': loss1.item(), 'acc': acc})

    model.train()

    val_loss = val_loss_meter.pop('loss1')
    val_acc = val_loss_meter.pop('acc')
    print('loss: %.4f' % val_loss, 'acc: %.4f' % val_acc)

    return val_loss
Beispiel #2
0
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack['img']

            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss1 = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss1': loss1.item()})

    model.train()

    validation_loss = (val_loss_meter.pop('loss1'))

    print('loss: %.4f' % validation_loss)

    return validation_loss
Beispiel #3
0
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            #############################修改代码#############################
            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)
            aug_img = pack['aug_img']
            aug_label = pack['aug_label'].cuda(non_blocking=True)
            con_imgs = torch.cat([img, aug_img], 0)
            con_labels = torch.cat([label, aug_label], 0)
            x = model(con_imgs)

            loss1 = F.multilabel_soft_margin_loss(x, con_labels)
            #############################修改代码#############################

            val_loss_meter.add({'loss1': loss1.item()})

    model.train()

    print('loss: %.4f' % (val_loss_meter.pop('loss1')))

    return
Beispiel #4
0
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')
    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')
    model.eval()
    loss_func = nn.BCEWithLogitsLoss()

    acc = 0
    c = 0
    with torch.no_grad():
        for pack in data_loader:
            img = pack['image'].cuda(non_blocking=True)
            label = pack['label'].cuda(non_blocking=True)
            x = model(img)
            x = torch.squeeze(x)
            label = label.float()
            loss1 = loss_func(x, label)
            # loss1 = F.multilabel_soft_margin_loss(x, label)
            val_loss_meter.add({'loss1': loss1.item()})

            # compute acc
            x = torch.sigmoid(x).round()
            # x_cpu = x.cpu().data.numpy()
            acc += (x == label).sum()
            c += x.view(-1).shape[0]

    model.train()
    print('loss: %.4f' % (val_loss_meter.pop('loss1')),
          'acc: %.4f' % (acc / c))
    return
Beispiel #5
0
def run(args):
    model = getattr(importlib.import_module(args.cam_network), 'Net')()
    train_dataset = voc12.dataloader.VOC12ClassificationDataset(args.train_list, voc12_root=args.voc12_root,
                                                                resize_long=(320, 640), hor_flip=True,
                                                                crop_size=512, crop_method="random")
    train_data_loader = DataLoader(train_dataset, batch_size=args.cam_batch_size,
                                   shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)
    max_step = (len(train_dataset) // args.cam_batch_size) * args.cam_num_epoches

    val_dataset = voc12.dataloader.VOC12ClassificationDataset(args.val_list, voc12_root=args.voc12_root,
                                                              crop_size=512)
    val_data_loader = DataLoader(val_dataset, batch_size=args.cam_batch_size,
                                 shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
    print('train_cam  val_data_loader')
    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([
        {'params': param_groups[0], 'lr': args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
        {'params': param_groups[1], 'lr': 10*args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
    ], lr=args.cam_learning_rate, weight_decay=args.cam_weight_decay, max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(args.cam_num_epoches):
        print('Epoch %d/%d' % (ep+1, args.cam_num_epoches))
        for step, pack in enumerate(train_data_loader):
            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)
            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)
            avg_meter.add({'loss1': loss.item()})
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (optimizer.global_step-1)%100 == 0:
                timer.update_progress(optimizer.global_step / max_step)
                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (avg_meter.pop('loss1')),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size / timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()), flush=True)

        else:
            validate(model, val_data_loader)
            timer.reset_stage()

    torch.save(model.module.state_dict(), args.cam_weights_name + '.pth')
    torch.cuda.empty_cache()
Beispiel #6
0
def run(args):
    path_index = indexing.PathIndex(radius=10,
                                    default_size=(args.irn_crop_size // 4,
                                                  args.irn_crop_size // 4))
    model = getattr(importlib.import_module(args.irn_network),
                    'AffinityDisplacementLoss')(path_index)

    train_dataset = voc12.dataloader.VOC12AffinityDataset(
        args.train_list,
        label_dir=args.ir_label_out_dir,
        voc12_root=args.voc12_root,
        indices_from=path_index.src_indices,
        indices_to=path_index.dst_indices,
        hor_flip=True,
        crop_size=args.irn_crop_size,
        crop_method="random",
        rescale=(0.5, 1.5))
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    max_step = (len(train_dataset) //
                args.irn_batch_size) * args.irn_num_epoches

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([{
        'params': param_groups[0],
        'lr': 1 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }, {
        'params': param_groups[1],
        'lr': 10 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }],
                                         lr=args.irn_learning_rate,
                                         weight_decay=args.irn_weight_decay,
                                         max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    avg_meter = pyutils.AverageMeter()
    timer = pyutils.Timer()
    for ep in range(args.irn_num_epoches):
        print('Epoch %d/%d' % (ep + 1, args.irn_num_epoches))
        for iter, pack in enumerate(train_data_loader):
            img = pack['img'].cuda(non_blocking=True)
            bg_pos_label = pack['aff_bg_pos_label'].cuda(non_blocking=True)
            fg_pos_label = pack['aff_fg_pos_label'].cuda(non_blocking=True)
            neg_label = pack['aff_neg_label'].cuda(non_blocking=True)

            pos_aff_loss, neg_aff_loss, dp_fg_loss, dp_bg_loss = model(
                img, True)

            bg_pos_aff_loss = torch.sum(
                bg_pos_label * pos_aff_loss) / (torch.sum(bg_pos_label) + 1e-5)
            fg_pos_aff_loss = torch.sum(
                fg_pos_label * pos_aff_loss) / (torch.sum(fg_pos_label) + 1e-5)
            pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2
            neg_aff_loss = torch.sum(
                neg_label * neg_aff_loss) / (torch.sum(neg_label) + 1e-5)

            dp_fg_loss = torch.sum(dp_fg_loss * torch.unsqueeze(
                fg_pos_label, 1)) / (2 * torch.sum(fg_pos_label) + 1e-5)
            dp_bg_loss = torch.sum(dp_bg_loss * torch.unsqueeze(
                bg_pos_label, 1)) / (2 * torch.sum(bg_pos_label) + 1e-5)
            avg_meter.add({
                'loss1': pos_aff_loss.item(),
                'loss2': neg_aff_loss.item(),
                'loss3': dp_fg_loss.item(),
                'loss4': dp_bg_loss.item()
            })
            total_loss = (pos_aff_loss + neg_aff_loss) / 2 + (dp_fg_loss +
                                                              dp_bg_loss) / 2

            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()
            if (optimizer.global_step - 1) % 50 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f %.4f %.4f %.4f' %
                      (avg_meter.pop('loss1'), avg_meter.pop('loss2'),
                       avg_meter.pop('loss3'), avg_meter.pop('loss4')),
                      'imps:%.1f' % ((iter + 1) * args.irn_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)
        else:
            timer.reset_stage()

    infer_dataset = voc12.dataloader.VOC12ImageDataset(
        args.infer_list,
        voc12_root=args.voc12_root,
        crop_size=args.irn_crop_size,
        crop_method="top_left")
    infer_data_loader = DataLoader(infer_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=False,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    model.eval()
    print('Analyzing displacements mean ... ', end='')

    dp_mean_list = []

    with torch.no_grad():
        for iter, pack in enumerate(infer_data_loader):
            img = pack['img'].cuda(non_blocking=True)
            aff, dp = model(img, False)
            dp_mean_list.append(torch.mean(dp, dim=(0, 2, 3)).cpu())
        model.module.mean_shift.running_mean = torch.mean(
            torch.stack(dp_mean_list), dim=0)
    print('done.')

    torch.save(model.module.state_dict(), args.irn_weights_name)
    torch.cuda.empty_cache()
Beispiel #7
0
    optimizer = torchutils.PolyOptimizer([
        {
            'params': param_groups[0],
            'lr': args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
        {
            'params': param_groups[1],
            'lr': 10 * args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
    ],
                                         lr=args.cam_learning_rate,
                                         weight_decay=args.cam_weight_decay,
                                         max_step=max_step)
    avg_meter = pyutils.AverageMeter()

    model = torch.nn.DataParallel(model).to(device)
    model.train()

    for epoch in range(args.cam_num_epoches):
        logger.info('Epoch %d/%d' % (epoch + 1, args.cam_num_epoches))
        train_loss_sum = 0
        train_iter_sum = 0
        for step, pack in tqdm.tqdm(enumerate(train_dataloader)):
            img = pack['img'].cuda()
            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)
Beispiel #8
0
def run(args):

    path_index = adv_indexing.PathIndex(radius=10,
                                        default_size=(args.crop_size // 4,
                                                      args.crop_size // 4))

    model = getattr(importlib.import_module(args.network),
                    'AffinityDisplacement')(
                        path_index.default_path_indices,
                        torch.from_numpy(path_index.default_src_indices),
                        torch.from_numpy(path_index.default_dst_indices))

    train_dataset = voc12.data.VOC12DisplacementAffinityDataset(
        args.train_list,
        label_dir=args.label_dir,
        voc12_root=args.voc12_root,
        indices_from=path_index.default_src_indices,
        indices_to=path_index.default_dst_indices,
        img_normal=model.normalize,
        hor_flip=True,
        crop_size=args.crop_size,
    )

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    max_step = (len(train_dataset) // args.batch_size) * args.num_epoches

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([{
        'params': param_groups[0],
        'lr': 1 * args.learning_rate,
        'weight_decay': args.weight_decay
    }, {
        'params': param_groups[1],
        'lr': 10 * args.learning_rate,
        'weight_decay': args.weight_decay
    }],
                                         lr=args.learning_rate,
                                         weight_decay=args.weight_decay,
                                         max_step=max_step)

    model = model.cuda()
    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(args.num_epoches):

        print('Epoch %d/%d' % (ep + 1, args.num_epoches))

        for iter, pack in enumerate(train_data_loader):

            img = pack['img'].cuda(non_blocking=True)
            bg_pos_label = pack['aff_bg_pos_label'].cuda(non_blocking=True)
            fg_pos_label = pack['aff_fg_pos_label'].cuda(non_blocking=True)
            neg_label = pack['aff_neg_label'].cuda(non_blocking=True)

            aff, dp = model(img)

            dp = path_index.to_displacement(dp)

            bg_pos_aff_loss = torch.sum(
                -bg_pos_label *
                torch.log(aff + 1e-5)) / (torch.sum(bg_pos_label) + 1e-5)
            fg_pos_aff_loss = torch.sum(
                -fg_pos_label *
                torch.log(aff + 1e-5)) / (torch.sum(fg_pos_label) + 1e-5)
            pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2

            neg_aff_loss = torch.sum(
                -neg_label *
                torch.log(1. + 1e-5 - aff)) / (torch.sum(neg_label) + 1e-5)

            dp_fg_loss = torch.sum(
                path_index.to_displacement_loss(dp) * torch.unsqueeze(
                    fg_pos_label, 1)) / (2 * torch.sum(fg_pos_label) + 1e-5)

            dp_bg_loss = torch.sum(
                torch.abs(dp) * torch.unsqueeze(bg_pos_label, 1)) / (
                    2 * torch.sum(bg_pos_label) + 1e-5)

            avg_meter.add({
                'loss1': pos_aff_loss,
                'loss2': neg_aff_loss,
                'loss3': dp_fg_loss.item(),
                'loss4': dp_bg_loss.item()
            })

            total_loss = (pos_aff_loss + neg_aff_loss) / 2 + (dp_fg_loss +
                                                              dp_bg_loss) / 2

            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 50 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                print(
                    'step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                    'loss:%.4f %.4f %.4f %.4f' %
                    (avg_meter.pop('loss1'), avg_meter.pop('loss2'),
                     avg_meter.pop('loss3'), avg_meter.pop('loss4')),
                    'imps:%.1f' %
                    ((iter + 1) * args.batch_size / timer.get_stage_elapsed()),
                    'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                    'etc:%s' % (timer.str_estimated_complete()),
                    flush=True)
        else:
            timer.reset_stage()

    torch.save(model.state_dict(), args.irn_weights_name)

    torch.cuda.empty_cache()
Beispiel #9
0
def run(args):

    path_index = indexing.PathIndex(radius=10,
                                    default_size=(args.irn_crop_size // 4,
                                                  args.irn_crop_size // 4))

    model = getattr(importlib.import_module(args.irn_network),
                    'AffinityDisplacementLoss')(path_index, args.model_dir,
                                                args.dataset, args.tag,
                                                args.num_classes, args.use_cls)
    if args.dataset == 'voc12':
        train_dataset = voc12.dataloader.VOC12AffinityDataset(
            args.train_list,
            label_dir=args.ir_label_out_dir,
            dev_root=args.dev_root,
            indices_from=path_index.src_indices,
            indices_to=path_index.dst_indices,
            hor_flip=True,
            crop_size=args.irn_crop_size,
            crop_method=args.crop_method,
            rescale=args.rescale_range,
            outsize=args.outsize,
            norm_mode=args.norm_mode)
        infer_dataset = voc12.dataloader.VOC12ImageDataset(
            args.infer_list,
            dev_root=args.dev_root,
            crop_size=args.irn_crop_size,
            crop_method="top_left")
    elif args.dataset in ['adp_morph', 'adp_func']:
        train_dataset = adp.dataloader.ADPAffinityDataset(
            args.train_list,
            is_eval=args.dataset == 'evaluation',
            label_dir=args.ir_label_out_dir,
            dev_root=args.dev_root,
            htt_type=args.dataset.split('_')[-1],
            indices_from=path_index.src_indices,
            indices_to=path_index.dst_indices,
            hor_flip=True,
            crop_size=args.irn_crop_size,
            crop_method=args.crop_method,
            rescale=args.rescale_range,
            outsize=args.outsize,
            norm_mode=args.norm_mode)
        infer_dataset = adp.dataloader.ADPImageDataset(
            args.infer_list,
            dev_root=args.dev_root,
            htt_type=args.dataset.split('_')[-1],
            is_eval=args.dataset == 'evaluation',
            crop_size=args.irn_crop_size,
            crop_method="top_left")
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        train_dataset = deepglobe.dataloader.DeepGlobeAffinityDataset(
            args.train_list,
            is_balanced=args.dataset == 'deepglobe_balanced',
            label_dir=args.ir_label_out_dir,
            dev_root=args.dev_root,
            indices_from=path_index.src_indices,
            indices_to=path_index.dst_indices,
            hor_flip=True,
            crop_size=args.irn_crop_size,
            crop_method=args.crop_method,
            rescale=args.rescale_range,
            outsize=args.outsize,
            norm_mode=args.norm_mode)
        infer_dataset = deepglobe.dataloader.DeepGlobeImageDataset(
            args.infer_list,
            dev_root=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced',
            crop_size=args.irn_crop_size,
            crop_method="top_left")
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    max_step = (len(train_dataset) //
                args.irn_batch_size) * args.irn_num_epoches

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([{
        'params': param_groups[0],
        'lr': 1 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }, {
        'params': param_groups[1],
        'lr': 10 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }],
                                         lr=args.irn_learning_rate,
                                         weight_decay=args.irn_weight_decay,
                                         max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    # writer = SummaryWriter('log_tb/' + args.run_name)

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(args.irn_num_epoches):

        print('Epoch %d/%d' % (ep + 1, args.irn_num_epoches))

        for iter, pack in enumerate(train_data_loader):

            img = pack['img'].cuda(non_blocking=True)
            bg_pos_label = pack['aff_bg_pos_label'].cuda(non_blocking=True)
            fg_pos_label = pack['aff_fg_pos_label'].cuda(non_blocking=True)
            neg_label = pack['aff_neg_label'].cuda(non_blocking=True)

            pos_aff_loss, neg_aff_loss, dp_fg_loss, dp_bg_loss = model(
                img, True)

            bg_pos_aff_loss = torch.sum(
                bg_pos_label * pos_aff_loss) / (torch.sum(bg_pos_label) + 1e-5)
            fg_pos_aff_loss = torch.sum(
                fg_pos_label * pos_aff_loss) / (torch.sum(fg_pos_label) + 1e-5)
            pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2
            neg_aff_loss = torch.sum(
                neg_label * neg_aff_loss) / (torch.sum(neg_label) + 1e-5)

            dp_fg_loss = torch.sum(dp_fg_loss * torch.unsqueeze(
                fg_pos_label, 1)) / (2 * torch.sum(fg_pos_label) + 1e-5)
            dp_bg_loss = torch.sum(dp_bg_loss * torch.unsqueeze(
                bg_pos_label, 1)) / (2 * torch.sum(bg_pos_label) + 1e-5)

            avg_meter.add({
                'loss1': pos_aff_loss.item(),
                'loss2': neg_aff_loss.item(),
                'loss3': dp_fg_loss.item(),
                'loss4': dp_bg_loss.item()
            })

            total_loss = (pos_aff_loss + neg_aff_loss) / 2 + (dp_fg_loss +
                                                              dp_bg_loss) / 2

            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 50 == 0:
                timer.update_progress(optimizer.global_step / max_step)
                losses = {}
                for i in range(1, 5):
                    losses[str(i)] = avg_meter.pop('loss' + str(i))

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f %.4f %.4f %.4f' %
                      (losses['1'], losses['2'], losses['3'], losses['4']),
                      'imps:%.1f' % ((iter + 1) * args.irn_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)
                # writer.add_scalar('step', optimizer.global_step, ep * len(train_data_loader) + iter)
                # writer.add_scalar('loss', losses['1']+losses['2']+losses['3']+losses['4'],
                #                   ep * len(train_data_loader) + iter)
                # writer.add_scalar('lr', optimizer.param_groups[0]['lr'], ep * len(train_data_loader) + iter)
        else:
            timer.reset_stage()
    infer_data_loader = DataLoader(infer_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=False,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    model.eval()
    print('Analyzing displacements mean ... ', end='')

    dp_mean_list = []

    with torch.no_grad():
        for iter, pack in enumerate(infer_data_loader):
            img = pack['img'].cuda(non_blocking=True)

            aff, dp = model(img, False)

            dp_mean_list.append(torch.mean(dp, dim=(0, 2, 3)).cpu())

        model.module.mean_shift.running_mean = torch.mean(
            torch.stack(dp_mean_list), dim=0)
    print('done.')

    torch.save(model.module.state_dict(), args.irn_weights_name)
    torch.cuda.empty_cache()
Beispiel #10
0
def run(args):
    if args.dataset == 'l8biome':
        model = getattr(importlib.import_module(args.cam_network),
                        'Net')(n_classes=2, in_channels=10, pretrained=False)
        train_dataset = l8biome.dataloader.L8BiomeDataset(args.data_root,
                                                          'train',
                                                          one_hot_labels=False)
        val_dataset = l8biome.dataloader.L8BiomeDataset(
            args.data_root, 'val',
            one_hot_labels=False)  # we train with crossentropy
        criterion = F.cross_entropy  # clear vs cloudy
    else:
        model = getattr(importlib.import_module(args.cam_network),
                        'Net')(n_classes=20, in_channels=3)
        train_dataset = voc12.dataloader.VOC12ClassificationDataset(
            args.train_list,
            voc12_root=args.data_root,
            resize_long=(320, 640),
            hor_flip=True,
            crop_size=512,
            crop_method="random")

        val_dataset = voc12.dataloader.VOC12ClassificationDataset(
            args.val_list, voc12_root=args.data_root, crop_size=512)
        criterion = F.multilabel_soft_margin_loss

    max_step = (len(train_dataset) //
                args.cam_batch_size) * args.cam_num_epoches
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.cam_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)
    val_data_loader = DataLoader(val_dataset,
                                 batch_size=args.cam_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True)

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([
        {
            'params': param_groups[0],
            'lr': args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
        {
            'params': param_groups[1],
            'lr': 10 * args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
    ],
                                         lr=args.cam_learning_rate,
                                         weight_decay=args.cam_weight_decay,
                                         max_step=max_step)

    print(model)
    print(
        f"Number of parameters: {sum([p.numel() for p in model.parameters()]):,}"
    )

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    avg_meter = pyutils.AverageMeter('loss1', 'acc')

    timer = pyutils.Timer()
    best_val_loss = np.inf

    for ep in range(args.cam_num_epoches):

        print('Epoch %d/%d' % (ep + 1, args.cam_num_epoches))

        for step, pack in enumerate(
                tqdm(train_data_loader,
                     f'Epoch {ep + 1}/{args.cam_num_epoches}')):

            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss = criterion(x, label)

            avg_meter.add({'loss1': loss.item(), 'acc': accuracy(x, label)})

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 100 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (avg_meter.pop('loss1')),
                      'acc:%.4f' % (avg_meter.pop('acc')),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)

        else:
            val_loss = validate(model, val_data_loader, criterion)
            if val_loss < best_val_loss:
                print(
                    f'Validation loss improved from {best_val_loss} to {val_loss}, saving model'
                )
                torch.save(model.module.state_dict(),
                           args.cam_weights_name + '.pth')
                best_val_loss = val_loss
            else:
                print(f'Validation loss did not improve from {best_val_loss}')

            timer.reset_stage()

    torch.cuda.empty_cache()
Beispiel #11
0
def run(args):
    path_index = indexing.PathIndex(radius=10,
                                    default_size=(args.irn_crop_size // 4,
                                                  args.irn_crop_size // 4))

    model = getattr(importlib.import_module(args.irn_network),
                    'AffinityDisplacementLoss')(path_index)

    transform_config = {
        'augmentation_scope': 'horizontal_flip',
        'images_normalization': 'default',
        'images_output_format_type': 'float',
        'masks_normalization': 'none',
        'masks_output_format_type': 'byte',
        'size': 512,
        'size_transform': 'resize'
    }
    transform = get_transforms(transform_config)

    train_dataset = voc12.dataloader.PneumothoraxAffinityDataset(
        '/datasets/LID/Pneumothorax/train/train_all_positive.csv',
        transform=transform,
        indices_from=path_index.src_indices,
        indices_to=path_index.dst_indices,
    )

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    max_step = (len(train_dataset) //
                args.irn_batch_size) * args.irn_num_epoches

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([{
        'params': param_groups[0],
        'lr': 1 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }, {
        'params': param_groups[1],
        'lr': 10 * args.irn_learning_rate,
        'weight_decay': args.irn_weight_decay
    }],
                                         lr=args.irn_learning_rate,
                                         weight_decay=args.irn_weight_decay,
                                         max_step=max_step)

    model = torch.nn.DataParallel(model.cuda(1),
                                  device_ids=['cuda:1', 'cuda:2'])
    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(args.irn_num_epoches):

        print('Epoch %d/%d' % (ep + 1, args.irn_num_epoches))

        for iter, pack in enumerate(train_data_loader):

            img = pack['img']
            bg_pos_label = pack['aff_bg_pos_label'].cuda(1, non_blocking=True)
            fg_pos_label = pack['aff_fg_pos_label'].cuda(1, non_blocking=True)
            neg_label = pack['aff_neg_label'].cuda(1, non_blocking=True)

            pos_aff_loss, neg_aff_loss, dp_fg_loss, dp_bg_loss = model(
                img, True)

            bg_pos_aff_loss = torch.sum(
                bg_pos_label * pos_aff_loss) / (torch.sum(bg_pos_label) + 1e-5)
            fg_pos_aff_loss = torch.sum(
                fg_pos_label * pos_aff_loss) / (torch.sum(fg_pos_label) + 1e-5)
            pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2
            neg_aff_loss = torch.sum(
                neg_label * neg_aff_loss) / (torch.sum(neg_label) + 1e-5)

            dp_fg_loss = torch.sum(dp_fg_loss * torch.unsqueeze(
                fg_pos_label, 1)) / (2 * torch.sum(fg_pos_label) + 1e-5)
            dp_bg_loss = torch.sum(dp_bg_loss * torch.unsqueeze(
                bg_pos_label, 1)) / (2 * torch.sum(bg_pos_label) + 1e-5)

            avg_meter.add({
                'loss1': pos_aff_loss.item(),
                'loss2': neg_aff_loss.item(),
                'loss3': dp_fg_loss.item(),
                'loss4': dp_bg_loss.item()
            })

            total_loss = (pos_aff_loss + neg_aff_loss) / 2 + (dp_fg_loss +
                                                              dp_bg_loss) / 2

            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 50 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f %.4f %.4f %.4f' %
                      (avg_meter.pop('loss1'), avg_meter.pop('loss2'),
                       avg_meter.pop('loss3'), avg_meter.pop('loss4')),
                      'imps:%.1f' % ((iter + 1) * args.irn_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)
        else:
            timer.reset_stage()

    transform_config = {
        'augmentation_scope': 'none',
        'images_normalization': 'default',
        'images_output_format_type': 'float',
        'size': 512,
        'size_transform': 'resize'
    }
    transform = get_transforms(transform_config)

    infer_dataset = voc12.dataloader.PneumothoraxImageDataset(
        '/datasets/LID/Pneumothorax/train/train_all_positive.csv',
        transform=transform)
    infer_data_loader = DataLoader(infer_dataset,
                                   batch_size=args.irn_batch_size,
                                   shuffle=False,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)

    model.eval()
    print('Analyzing displacements mean ... ', end='')

    dp_mean_list = []

    with torch.no_grad():
        for iter, pack in enumerate(infer_data_loader):
            img = pack['img']

            aff, dp = model(img, False)

            dp_mean_list.append(torch.mean(dp, dim=(0, 2, 3)).cpu())

        model.module.mean_shift.running_mean = torch.mean(
            torch.stack(dp_mean_list), dim=0)
    print('done.')

    torch.save(model.module.state_dict(), args.irn_weights_name)
    torch.cuda.empty_cache()
Beispiel #12
0
def run(args):

    model = getattr(importlib.import_module(args.cam_network), 'Net')()

    train_dataset = voc12.dataloader.VOC12ClassificationDataset(
        args.train_list,
        voc12_root=args.voc12_root,
        resize_long=(320, 640),
        hor_flip=True,
        crop_size=512,
        crop_method="random")
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.cam_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)
    max_step = (len(train_dataset) //
                args.cam_batch_size) * args.cam_num_epoches

    val_dataset = voc12.dataloader.VOC12ClassificationDataset(
        args.val_list, voc12_root=args.voc12_root, crop_size=512)
    val_data_loader = DataLoader(val_dataset,
                                 batch_size=args.cam_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True)

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([
        {
            'params': param_groups[0],
            'lr': args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
        {
            'params': param_groups[1],
            'lr': 10 * args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
    ],
                                         lr=args.cam_learning_rate,
                                         weight_decay=args.cam_weight_decay,
                                         max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    new_validation_loss = float('inf')
    old_validation_loss = float('inf')
    optimal_validation_loss = float('inf')

    early_stop_now = False

    ep = 0
    ep_max = args.cam_num_epoches

    training_vec = []

    while (ep < ep_max and early_stop_now is False):

        old_validation_loss = new_validation_loss

        print('Epoch %d/%d' % (ep + 1, args.cam_num_epoches))

        for step, pack in enumerate(train_data_loader):

            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)

            avg_meter.add({'loss1': loss.item()})

            current_train_loss = 0

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 100 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                current_train_loss = avg_meter.pop('loss1')
                training_vec.append(current_train_loss)

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (current_train_loss),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)

        else:
            new_validation_loss = validate(model, val_data_loader)
            timer.reset_stage()

        if (new_validation_loss < optimal_validation_loss):
            optimal_validation_loss = new_validation_loss

        GL_value = calculateGL(optimal_validation_loss, new_validation_loss)
        Pk_value = calculatePk(training_vec, args.stopping_k)
        PQ_value = calculatePQ(GL_value, Pk_value)

        print('GL:%.1f' % GL_value)
        print('P(k):%.2f' % Pk_value)
        print('PQ:%.2f' % PQ_value)

        if (args.stopping_criterion == "threshold"
                and GL_value > args.stopping_threshold):
            early_stop_now = True

        if (args.stopping_criterion == "strip"
                and PQ_value > args.stopping_threshold):
            early_stop_now = True

        if (args.stopping_criterion == "onlyPk"
                and 100 / Pk_value > args.stopping_threshold):
            early_stop_now = True

        ep += 1

    if (early_stop_now == True and ep < ep_max):
        print("Early stopping activated")

    torch.save(model.module.state_dict(), args.cam_weights_name + '.pth')
    torch.cuda.empty_cache()
Beispiel #13
0
def run(args):
    model = getattr(importlib.import_module(args.cam_network), 'Net')()
    # train_dataset = voc12.dataloader.VOC12ClassificationDataset(args.train_list, voc12_root=args.voc12_root,
    #                                                             resize_long=(320, 640), hor_flip=True,
    #                                                             crop_size=512, crop_method="random")
    train_dataset = LiTS_dataset(
        '/home/viplab/nas/train5/',
        'train',
        transform=RandomGenerator(output_size=[256, 256]),
        tumor_only=True)
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.cam_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)
    max_step = (len(train_dataset) //
                args.cam_batch_size) * args.cam_num_epoches
    # val_dataset = voc12.dataloader.VOC12ClassificationDataset(args.val_list, voc12_root=args.voc12_root,
    #                                                           crop_size=512)
    val_dataset = LiTS_dataset('/home/viplab/nas/val5/',
                               'train',
                               tumor_only=True)
    val_data_loader = DataLoader(val_dataset,
                                 batch_size=args.cam_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True)

    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([
        {
            'params': param_groups[0],
            'lr': args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
        {
            'params': param_groups[1],
            'lr': 10 * args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
    ],
                                         lr=args.cam_learning_rate,
                                         weight_decay=args.cam_weight_decay,
                                         max_step=max_step)

    loss_func = nn.BCEWithLogitsLoss()

    # model = torch.nn.DataParallel(model).cuda()
    model.train().cuda()
    avg_meter = pyutils.AverageMeter()
    timer = pyutils.Timer()
    # torch.autograd.set_detect_anomaly(True)
    for ep in range(args.cam_num_epoches):
        print('Epoch %d/%d' % (ep + 1, args.cam_num_epoches))
        acc = 0
        c = 0
        for step, pack in enumerate(train_data_loader):
            img = pack['image'].cuda(non_blocking=True)
            label = pack['label'].cuda(non_blocking=True)
            x = model(img)
            # loss = F.multilabel_soft_margin_loss(x, label)
            x = torch.squeeze(x)
            label = label.float()
            loss = loss_func(x, label)
            avg_meter.add({'loss1': loss.item()})
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # compute acc
            x = torch.sigmoid(x).round()
            # x_cpu = x.cpu().data.numpy()
            acc += (x == label).sum()
            c += x.view(-1).shape[0]

            if (optimizer.global_step - 1) % 100 == 0:
                timer.update_progress(optimizer.global_step / max_step)
                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (avg_meter.pop('loss1')),
                      'acc:%.4f' % (acc / c),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)

        # else:
        validate(model, val_data_loader)
        timer.reset_stage()

    torch.save(model.state_dict(), args.cam_weights_name + '.pth')
    torch.cuda.empty_cache()
Beispiel #14
0
def run(args):

    model = getattr(importlib.import_module(args.cam_network), 'Net')()

    #criterion = eval('FocalSymmetricLovaszHardLogLoss')().cuda()
    #scheduler = eval('Adam45')()

    writer = SummaryWriter()

    train_dataset = voc12.dataloader.VOC12ClassificationDataset(
        args.train_list,
        voc12_root=args.voc12_root,
        image_folder=args.image_folder,
        resize_long=(320, 640),
        hor_flip=True,
        crop_size=512,
        crop_method="random")
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.cam_batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True)
    max_step = (len(train_dataset) //
                args.cam_batch_size) * args.cam_num_epoches

    val_dataset = voc12.dataloader.VOC12ClassificationDataset(
        args.val_list,
        voc12_root=args.voc12_root,
        image_folder=args.image_folder,
        crop_size=512)
    val_data_loader = DataLoader(val_dataset,
                                 batch_size=args.cam_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True)

    param_groups = model.trainable_parameters()

    start_epoch = 0
    optimizer = scheduler.schedule(model, start_epoch, args.cam_num_epoches)[0]

    optimizer = torchutils.PolyOptimizer([
        {
            'params': param_groups[0],
            'lr': args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
        {
            'params': param_groups[1],
            'lr': 10 * args.cam_learning_rate,
            'weight_decay': args.cam_weight_decay
        },
    ],
                                         lr=args.cam_learning_rate,
                                         weight_decay=args.cam_weight_decay,
                                         max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()

    epoch = 0
    if args.load_from_checkpoint:
        model, optimizer, epoch, loss, = torchutils.load_checkpoint(
            args, model, optimizer)
        args.cam_num_epoches -= (epoch + 1)
        epoch += 1

    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(epoch, args.cam_num_epoches):

        print('Epoch %d/%d' % (ep + 1, args.cam_num_epoches))

        for step, pack in enumerate(train_data_loader):

            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)
            #loss = criterion(x, label, epoch=ep)

            avg_meter.add({'loss1': loss.item()})

            #writer.add_scalar("Loss/train", loss, ep)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (optimizer.global_step - 1) % 100 == 0:
                timer.update_progress(optimizer.global_step / max_step)

                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (avg_meter.pop('loss1')),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size /
                                     timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()),
                      flush=True)

        else:
            validate(model, val_data_loader)
            timer.reset_stage()
            torchutils.save_checkpoint(args, {
                'epoch': ep,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'loss': loss
            },
                                       is_best=False,
                                       filename='%s_epoch_%d.pth' %
                                       ('train_cam', ep))

    torch.save(model.module.state_dict(), args.cam_weights_name + '.pth')
    torch.cuda.empty_cache()