# transform = [transforms.ColorJitter(0.5, 0.5, 0.5, 0),
    #             transforms.RandomAffine(180),
    #             transforms.RandomErasing(p=1, value=1)]

    # train_dataset = ImagenetDataAugDataset(root_dir=args.train_dir, num_wt=3, mask_dim=args.mask_dim, wt=wt,
    #                                        filters=filters_cpu, default_transform=default_transform,
    #                                        transform=transform, p=0.1)

    # Create train dataset
    dataset = SampleDataset(file_path=args.sample_file)

    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=True,
                                              drop_last=True)

    # Model and optimizer
    model = UNet_NTail_128_Mod(n_channels=12,
                               n_classes=3,
                               n_tails=12,
                               bilinear=True).to(args.device)

    # Load weights
    if args.resume:
        print('Loading weights')
        model = load_weights(model, args.checkpoint_path, args)

    eval_biggan_unet128(model, data_loader, args)
Exemple #2
0
                                     transform=default_transform)

    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # Load 128 model
    print('Loading model 128 weights')
    model_128 = UNet_NTail_128_Mod(n_channels=12,
                                   n_classes=3,
                                   n_tails=12,
                                   bilinear=True).to(args.device)
    model_128 = load_weights(model_128, args.model_128_weights, args)

    # Model and optimizer
    model = UNet_NTail_128_Mod1(n_channels=48,
                                n_classes=3,
                                n_tails=48,
                                bilinear=True).to(args.device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    state_dict = {'itr': 0}

    if args.resume:
        print('Loading weights & resuming from iteration {}'.format(
            args.checkpoint))
        model, optimizer, logger = load_checkpoint(model, optimizer, '256',
                                                   args)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # Create validation dataset
    valid_dataset = dset.ImageFolder(root=args.valid_dir,
                                     transform=default_transform)

    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # Model and optimizer
    model = UNet_NTail_128_Mod(n_channels=48,
                               n_classes=3,
                               n_tails=48,
                               bilinear=True).to(args.device)

    # Load weights
    print('Loading weights')
    model = load_weights(model, args.model_256_weights, args)

    eval_unet256(model, train_loader, 'train', args)
    eval_unet256(model, valid_loader, 'valid', args)
                                               pin_memory=True,
                                               drop_last=True)

    # Create validation dataset
    valid_dataset = dset.ImageFolder(root=args.valid_dir,
                                     transform=default_transform)

    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # Model
    print('Loading UNet 128 and 256 weights')
    model_128 = UNet_NTail_128_Mod(n_channels=12,
                                   n_classes=3,
                                   n_tails=12,
                                   bilinear=True).to(args.device)
    model_128 = load_weights(model_128, args.model_128_weights, args)

    model_256 = UNet_NTail_128_Mod(n_channels=48,
                                   n_classes=3,
                                   n_tails=48,
                                   bilinear=True).to(args.device)
    model_256 = load_weights(model_256, args.model_256_weights, args)

    eval_unet_128_256(model_128, model_256, train_loader, 'train', args)
    eval_unet_128_256(model_128, model_256, valid_loader, 'valid', args)