def main():
    parser = argparse.ArgumentParser(description='GAN-based unsupervised segmentation train')
    parser.add_argument('--args', type=str, default=None, help='json with all arguments')

    parser.add_argument('--out', type=str, required=True)
    parser.add_argument('--gan_weights', type=str, default=BIGBIGAN_WEIGHTS)
    parser.add_argument('--bg_direction', type=str, required=True)
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--gen_devices', type=int, nargs='+', default=[1,])
    parser.add_argument('--seed', type=int, default=2)

    parser.add_argument('--z', type=str, default=None)
    parser.add_argument('--z_noise', type=float, default=0.0)
    parser.add_argument('--val_images_dirs', nargs='*', type=str, default=[None])
    parser.add_argument('--val_masks_dirs', nargs='*', type=str, default=[None])

    for key, val in SegmentationTrainParams().__dict__.items():
        val_type = type(val) if key != 'synthezing' else str
        parser.add_argument('--{}'.format(key), type=val_type, default=None)

    args = parser.parse_args()
    torch.random.manual_seed(args.seed)
    torch.cuda.set_device(args.device)
    if args.args is not None:
        with open(args.args) as args_json:
            args_dict = json.load(args_json)
            args.__dict__.update(**args_dict)
    if (args.z is not None) and (not os.path.isfile(args.z)):
        print('No valid z-embeddings file is provided.')
        args.z = None

    # save run p
    if not os.path.isdir(args.out):
        os.makedirs(args.out)
    with open(os.path.join(args.out, 'args.json'), 'w') as args_file:
        json.dump(args.__dict__, args_file)
    with open(os.path.join(args.out, 'command.sh'), 'w') as command_file:
        command_file.write(' '.join(sys.argv))
        command_file.write('\n')

    G = make_big_gan(args.gan_weights).eval().cuda()
    bg_direction = torch.load(args.bg_direction)

    model = UNet().train().cuda()
    train_params = SegmentationTrainParams(**args.__dict__)
    print('run train with p: {}'.format(train_params.__dict__))

    synthetic_score = train_segmentation(
        G, bg_direction, model, train_params, args.out,
        args.gen_devices, val_dirs=[args.val_images_dirs[0], args.val_masks_dirs[0]],
        zs=args.z, z_noise=args.z_noise)

    score_json = os.path.join(args.out, 'score.json')
    update_out_json({'synthetic': synthetic_score}, score_json)
    print('Synthetic data score: {}'.format(synthetic_score))

    if len(args.val_images_dirs) > 0:
        evaluate_all_wrappers(model, score_json,
                              args.val_images_dirs, args.val_masks_dirs)
Exemplo n.º 2
0
def loadBigBiGAN(weightpath=None):
    sys.path.append(BigBiGAN_root)
    from BigGAN.gan_load import UnconditionalBigGAN, make_big_gan
    # from BigGAN.model.BigGAN import Generator
    if weightpath is None:
        weightpath = join(BigBiGAN_root, "BigGAN\weights\BigBiGAN_x1.pth")
    BBGAN = make_big_gan(weightpath, resolution=128)
    # BBGAN = make_big_gan(r"E:\Github_Projects\BigGANsAreWatching\BigGAN\weights\BigBiGAN_x1.pth", resolution=128)
    for param in BBGAN.parameters():
        param.requires_grad_(False)
    BBGAN.eval()
    return BBGAN
def main():
    parser = argparse.ArgumentParser(
        description='GAN-based unsupervised segmentation train')
    parser.add_argument('--gan_weights', type=str, default=BIGBIGAN_WEIGHTS)
    parser.add_argument('--bg_direction', type=str, required=True)

    parser.add_argument('--z', type=str, default=None)
    parser.add_argument('--seed', type=int, default=2)
    parser.add_argument('--val_images_dirs',
                        nargs='*',
                        type=str,
                        default=[None])
    parser.add_argument('--val_masks_dirs',
                        nargs='*',
                        type=str,
                        default=[None])

    args = parser.parse_args()

    # load G
    G = make_big_gan(args.gan_weights).eval().cuda()
    bg_direction = torch.load(args.bg_direction)
    evaluate_pseudo_labels(G, bg_direction, args.z, args.val_images_dirs,
                           args.val_masks_dirs)
Exemplo n.º 4
0
    interp_vects = steps @ tangvect + refvect
    return interp_vects
#%%
BGAN = BigGAN.from_pretrained("biggan-deep-256")
for param in BGAN.parameters():
    param.requires_grad_(False)
embed_mat = BGAN.embeddings.parameters().__next__().data
BGAN.cuda()
# the model is on cuda from this.

#%% import BigBiGAN! 
import sys
sys.path.append(r"E:\Github_Projects\BigGANsAreWatching")
from BigGAN.gan_load import UnconditionalBigGAN, make_big_gan
from BigGAN.model.BigGAN import Generator
BBGAN = make_big_gan(r"E:\Github_Projects\BigGANsAreWatching\BigGAN\weights\BigBiGAN_x1.pth", resolution=128)

#%% 
sys.path.append(r"D:\Github\PerceptualSimilarity")
sys.path.append(r"E:\Github_Projects\PerceptualSimilarity")
import models
ImDist = models.PerceptualLoss(model='net-lin', net='squeeze', use_gpu=1, gpu_ids=[0])
#%%
from torchvision.transforms import Normalize, Compose
RGB_mean = torch.tensor([0.485, 0.456, 0.406]).view(1,-1,1,1).cuda()
RGB_std  = torch.tensor([0.229, 0.224, 0.225]).view(1,-1,1,1).cuda()
preprocess = Compose([lambda img: (F.interpolate(img, (224, 224), mode='bilinear', align_corners=True) - RGB_mean) / RGB_std])
preprocess_resize = Compose([lambda img: F.interpolate(img, (224, 224), mode='bilinear', align_corners=True) ])
#%%
import torch.nn as nn
from GAN_utils import BigGAN_wrapper