Пример #1
0
def test(args):
    # device
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.backends.cudnn.benchmark = True

    # data
    testset = SonyTestDataset(args.input_dir, args.gt_dir)
    test_loader = DataLoader(testset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    # model
    model = Unet()
    model.load_state_dict(torch.load(args.model))
    model.to(device)
    model.eval()

    # testing
    for i, databatch in tqdm(enumerate(test_loader), total=len(test_loader)):
        input_full, scale_full, gt_full, test_id, ratio = databatch
        scale_full, gt_full = torch.squeeze(scale_full), torch.squeeze(gt_full)

        # processing
        inputs = input_full.to(device)
        outputs = model(inputs)
        outputs = outputs.cpu().detach()
        outputs = torch.squeeze(outputs)
        outputs = outputs.permute(1, 2, 0)

        # scaling can clipping
        outputs, scale_full, gt_full = outputs.numpy(), scale_full.numpy(
        ), gt_full.numpy()
        scale_full = scale_full * np.mean(gt_full) / np.mean(
            scale_full
        )  # scale the low-light image to the same mean of the ground truth
        outputs = np.minimum(np.maximum(outputs, 0), 1)

        # saving
        if not os.path.isdir(os.path.join(args.result_dir, 'eval')):
            os.makedirs(os.path.join(args.result_dir, 'eval'))
        scipy.misc.toimage(
            scale_full * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_scale.jpg' % (test_id[0], ratio[0])))
        scipy.misc.toimage(
            outputs * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_out.jpg' % (test_id[0], ratio[0])))
        scipy.misc.toimage(
            gt_full * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_gt.jpg' % (test_id[0], ratio[0])))
Пример #2
0
def convert_save(path, name, ckpth, image):
    model = Unet()
    model.load_state_dict(torch.load(os.path.join(ckpth, 'Generator.pth')))

    output_image = model(image).squeeze(0).detach().numpy()
    output_image = np.moveaxis(output_image, 0, 2)
    output_image = output_image * np.array((0.5, 0.5, 0.5)) + \
        np.array((0.5, 0.5, 0.5))
    output_image = np.clip(output_image, 0, 1)
    output_image = Image.fromarray(np.uint8(output_image * 255))
    output_image.save(os.path.join(path, name))
Пример #3
0
def test(args):
    # device
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.backends.cudnn.benchmark = True

    # images path
    fns = glob.glob(path.join(args.imgdir, '*.DNG'))
    n = len(fns)

    # model
    model = Unet()
    model.load_state_dict(torch.load(args.model))
    model.to(device)
    model.eval()

    # ratio
    ratio = 200

    for idx in range(n):
        fn = fns[idx]
        print(fn)
        raw = rawpy.imread(fn)

        input = np.expand_dims(pack_raw(raw), axis=0) * ratio
        scale_full = np.expand_dims(np.float32(input / 65535.0), axis=0)
        input = crop_center(input, 1024, 1024)
        input = torch.from_numpy(input)
        input = torch.squeeze(input)
        input = input.permute(2, 0, 1)
        input = torch.unsqueeze(input, dim=0)
        input = input.to(device)

        outputs = model(input)
        outputs = outputs.cpu().detach()
        outputs = torch.squeeze(outputs)
        outputs = outputs.permute(1, 2, 0)

        outputs = outputs.numpy()
        outputs = np.minimum(np.maximum(outputs, 0), 1)

        scale_full = torch.from_numpy(scale_full)
        scale_full = torch.squeeze(scale_full)

        scipy.misc.toimage(outputs * 255, high=255, low=0, cmin=0,
                           cmax=255).save(
                               path.join(args.imgdir,
                                         path.basename(fn) + '_out.jpg'))
Пример #4
0
def main():
    args = parser.parse_args()
    step = 0
    exp_name = f'{args.name}_{hp.max_lr}_{hp.cycle_length}'

    transforms = segtrans.JointCompose([segtrans.Resize(400),
                                        segtrans.RandomRotate(0, 90),
                                        segtrans.RandomCrop(256, 256),
                                        segtrans.ToTensor(),
                                        segtrans.Normalize(mean=hp.mean,
                                                           std=hp.std)])

    val_transforms = segtrans.JointCompose([segtrans.PadToFactor(),
                                            segtrans.ToTensor(),
                                            segtrans.Normalize(mean=hp.mean,
                                                               std=hp.std)])

    train_dataset = DSBDataset(f'{args.data}/train', transforms=transforms)
    val_dataset = DSBDataset(f'{args.data}/val', transforms=val_transforms)

    model = Unet()

    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint)
        model.load_state_dict(checkpoint['state'])
        step = checkpoint['step']
        exp_name = checkpoint['exp_name']

    optimizer = Adam(model.parameters(), lr=hp.max_lr)

    if args.find_lr:
        scheduler = LRFinderScheduler(optimizer)
    else:
        scheduler = SGDRScheduler(optimizer, min_lr=hp.min_lr,
                                  max_lr=hp.max_lr, cycle_length=hp.cycle_length, current_step=step)

    model.cuda(device=args.device)
    train(model, optimizer, scheduler, train_dataset, val_dataset,
          n_epochs=args.epochs, batch_size=args.batch_size,
          exp_name=exp_name, device=args.device, step=step)
Пример #5
0
def train(args):
    # device
    device = torch.device("cuda:%d" % args.gpu if torch.cuda.is_available() else "cpu")

    # data
    trainset = SonyDataset(args.input_dir, args.gt_dir, args.ps)
    train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=12)
    logging.info("data loading okay")

    # model
    model = Unet().to(device)

    # resume
    starting_epoch = 0
    if args.resume is not None:
        model.load_state_dict(torch.load(args.resume))
        starting_epoch = int(args.resume[-7:-3])
        print('resume at %d epoch' % starting_epoch)


    # loss function
    color_loss = nn.L1Loss()
    gradient_loss = GradLoss(device)

    # optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)

    # lr scheduler
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.1)

    # training
    running_loss = 0.0
    for epoch in range(starting_epoch+1, starting_epoch + args.num_epoch):
        scheduler.step()
        for i, databatch in enumerate(train_loader):
            # get the inputs
            input_patch, gt_patch, train_id, ratio = databatch
            input_patch, gt_patch = input_patch.to(device), gt_patch.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model(input_patch)
            loss = color_loss(outputs, gt_patch) + gradient_loss(outputs, gt_patch)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % args.log_interval == (args.log_interval - 1):
                print('[%d, %5d] loss: %.3f %s' %
                      (epoch, i, running_loss / args.log_interval, datetime.now()))
                running_loss = 0.0

            if epoch % args.save_freq == 0:
                if not os.path.isdir(os.path.join(args.result_dir, '%04d' % epoch)):
                    os.makedirs(os.path.join(args.result_dir, '%04d' % epoch))
                
                gt_patch = gt_patch.cpu().detach().numpy()
                outputs = outputs.cpu().detach().numpy()
                train_id = train_id.numpy()
                ratio = ratio.numpy()

                temp = np.concatenate((gt_patch[0, :, :, :], outputs[0, :, :, :]), axis=2)
                scipy.misc.toimage(temp * 255, high=255, low=0, cmin=0, cmax=255).save(
                    args.result_dir + '%04d/%05d_00_train_%d.jpg' % (epoch, train_id[0], ratio[0]))

        # at the end of epoch
        if epoch % args.model_save_freq == 0:
            torch.save(model.state_dict(), args.checkpoint_dir + './model_%d.pl' % epoch)
Пример #6
0
            with open(file_name, 'a+') as f:

                for i in range(mask_preds.shape[0]):
                    s = str(i + 1 + start) + ',' + rles[i]
                    f.write(s + '\n')

        start += mask_preds.shape[0]


file_name = '../submission.csv'
with open(file_name, 'a+') as f:
    f.write('img,pixels\n')

# Load saved model
model = Unet(1, add_residual=True)
model.load_state_dict(torch.load('./saved_model'))  # Load trained model

if use_cuda and torch.cuda.is_available():
    model.cuda()

transforms_valid = A.Compose([
    A.Resize(height=512, width=512, p=1.0),

    # A.Normalize(mean=(0),std=(255),p=1.0),
    ToTensorV2(p=1.0),
])

sub = pd.read_csv('../data-samples/sample_submission.csv')
sub_data = Ultrasound_Dataset(
    sub, transform=transforms_valid)  # Same Transform as in validation
sub_loader = DataLoader(sub_data, batch_size=4,
Пример #7
0
def main(args):

    data_path = '/home/birgit/MA/Code/torchmeta/gitlab/data'
    with open(args.config, 'r') as f:
        config = json.load(f)

    if args.folder is not None:
        config['folder'] = args.folder
    if args.num_steps > 0:
        config['num_steps'] = args.num_steps
    if args.num_batches > 0:
        config['num_batches'] = args.num_batches

    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    loss_function = DiceLoss()

    dataset = 'pascal5i'
    fold = config['fold']

    steps = config['num_adaption_steps']

    padding = 1

    if 'feature_scale' in config.keys():
        model = Unet(feature_scale=config['feature_scale'], padding=padding)
    else:
        model = Unet(feature_scale=4, padding=padding)

    # get datasets and load into meta learning format
    meta_train_dataset, meta_val_dataset, meta_test_dataset = get_datasets(
        dataset,
        data_path,
        config['num_ways'],
        config['num_shots'],
        config['num_shots_test'],
        fold=fold,
        download=False,
        augment=False)

    meta_val_dataloader = BatchMetaDataLoader(meta_val_dataset,
                                              batch_size=config['batch_size'],
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    print('num shots = ', config['num_shots'])
    print(f'Using device: {device}')

    with open(config['model_path'], 'rb') as f:
        model.load_state_dict(torch.load(f, map_location=device))

    metalearner = ModelAgnosticMetaLearning(model,
                                            first_order=config['first_order'],
                                            num_adaptation_steps=steps,
                                            step_size=config['step_size'],
                                            loss_function=loss_function,
                                            device=device)

    results = metalearner.evaluate(meta_val_dataloader,
                                   max_batches=config['num_batches'],
                                   verbose=args.verbose,
                                   desc='Test',
                                   is_test=True)

    if dataset == 'pascal5i':
        labels = [
            'aeroplane', 'bike', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
            'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike',
            'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
        ]
        accuracies = [
            value for _, value in results['mean_acc_per_label'].items()
        ]
        ious = [value for _, value in results['mean_iou_per_label'].items()]

        val_ious = [x for x in ious if x > 0.0]
        val_accs = [x for x in accuracies if x > 0.0]

        y_pos = np.arange(len(labels))

        fig, (ax1, ax2) = plt.subplots(1, 2)

        ax1.barh(y_pos, accuracies, align='center', alpha=0.5)
        ax1.set_yticks(y_pos)
        ax1.set_yticklabels(labels)
        ax1.set_xlabel('acc')
        ax1.set_xlim(0, 1)
        ax1.set_title('Accuracies per label')

        ax2.barh(y_pos, ious, align='center', alpha=0.5)
        ax2.set_yticks(y_pos)
        ax2.set_yticklabels(labels)
        ax2.set_xlabel('iou')
        ax2.set_xlim(0, 1)
        ax2.set_title('IoU scores per label')
        plt.grid(True)

        plt.show()

    # Save results
    dirname = os.path.dirname(config['model_path'])
    with open(os.path.join(dirname, 'test_results.json'), 'w') as f:
        json.dump(results, f)