예제 #1
0
def main():
    parser = argparse.ArgumentParser(
        description='GAN-based unsupervised segmentation train')
    parser.add_argument('--out', type=str, required=True)
    parser.add_argument('--gan_weights', type=str, default=WEIGHTS['BigGAN'])
    parser.add_argument('--deformator_weights', type=str, required=True)
    parser.add_argument('--deformator_type',
                        type=str,
                        choices=DEFORMATOR_TYPE_DICT.keys(),
                        required=True)
    parser.add_argument('--background_dim', type=int, required=True)
    parser.add_argument('--classes',
                        type=int,
                        nargs='*',
                        default=list(range(1000)))
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--seed', type=int, default=2)

    parser.add_argument('--val_images_dir', type=str, default=None)
    parser.add_argument('--val_masks_dir', type=str, default=None)

    for key, val in SegmentationTrainParams().__dict__.items():
        val_type = type(val) if key is not 'synthezing' else str
        parser.add_argument('--{}'.format(key), type=val_type, default=None)

    args = parser.parse_args()
    torch.random.manual_seed(args.seed)

    torch.cuda.set_device(args.device)
    # save run p
    save_command_run_params(args)

    if len(args.classes) == 0:
        print('using all ImageNet')
        args.classes = list(range(1000))
    G = make_big_gan(args.gan_weights, args.classes).eval().cuda()
    deformator = LatentDeformator(
        G.dim_z, type=DEFORMATOR_TYPE_DICT[args.deformator_type])
    deformator.load_state_dict(
        torch.load(args.deformator_weights, map_location=torch.device('cpu')))
    deformator.eval().cuda()

    model = UNet().train().cuda()
    train_params = SegmentationTrainParams(**args.__dict__)
    print(f'run train with p: {train_params.__dict__}')

    train_segmentation(G,
                       deformator,
                       model,
                       train_params,
                       args.background_dim,
                       args.out,
                       val_dirs=[args.val_images_dir, args.val_masks_dir])
예제 #2
0
def load_from_dir(root_dir, model_index=None, G_weights=None, shift_in_w=True):
    args = json.load(open(os.path.join(root_dir, 'args.json')))
    args['w_shift'] = shift_in_w

    models_dir = os.path.join(root_dir, 'models')
    if model_index is None:
        models = os.listdir(models_dir)
        model_index = max([
            int(name.split('.')[0].split('_')[-1]) for name in models
            if name.startswith('deformator')
        ])

    if G_weights is None:
        G_weights = args['gan_weights']
    if G_weights is None or not os.path.isfile(G_weights):
        print('Using default local G weights')
        G_weights = WEIGHTS[args['gan_type']]
        if isinstance(G_weights, dict):
            G_weights = G_weights[str(args['resolution'])]

    if 'resolution' not in args.keys():
        args['resolution'] = 128

    G = load_generator(args, G_weights)
    deformator = LatentDeformator(
        shift_dim=G.dim_shift,
        input_dim=args['directions_count']
        if 'directions_count' in args.keys() else None,
        out_dim=args['max_latent_dim']
        if 'max_latent_dim' in args.keys() else None,
        type=DEFORMATOR_TYPE_DICT[args['deformator']])

    if 'shift_predictor' not in args.keys(
    ) or args['shift_predictor'] == 'ResNet':
        shift_predictor = LatentShiftPredictor(G.dim_shift)
    elif args['shift_predictor'] == 'LeNet':
        shift_predictor = LeNetShiftPredictor(
            G.dim_shift, 1 if args['gan_type'] == 'SN_MNIST' else 3)

    deformator_model_path = os.path.join(
        models_dir, 'deformator_{}.pt'.format(model_index))
    shift_model_path = os.path.join(
        models_dir, 'shift_predictor_{}.pt'.format(model_index))
    if os.path.isfile(deformator_model_path):
        deformator.load_state_dict(
            torch.load(deformator_model_path,
                       map_location=torch.device('cpu')))
    if os.path.isfile(shift_model_path):
        shift_predictor.load_state_dict(
            torch.load(shift_model_path, map_location=torch.device('cpu')))

    setattr(
        deformator, 'annotation',
        load_human_annotation(os.path.join(root_dir, HUMAN_ANNOTATION_FILE)))

    return deformator.eval().cuda(), G.eval().cuda(), shift_predictor.eval(
    ).cuda()
예제 #3
0
def load_from_dir(root_dir, model_index=None, G_weights=None, verbose=False):
    args = json.load(open(os.path.join(root_dir, 'args.json')))

    models_dir = os.path.join(root_dir, 'models')
    if model_index is None:
        models = os.listdir(models_dir)
        model_index = max([
            int(name.split('.')[0].split('_')[-1]) for name in models
            if name.startswith('deformator')
        ])

        if verbose:
            print('using max index {}'.format(model_index))

    if G_weights is None:
        G_weights = args['gan_weights']
    if G_weights is None or not os.path.isfile(G_weights):
        if verbose:
            print('Using default local G weights')
        G_weights = WEIGHTS[args['gan_type']]

    if args['gan_type'] == 'BigGAN':
        G = make_big_gan(G_weights, args['target_class']).eval()
    elif args['gan_type'] in ['ProgGAN', 'PGGAN']:
        G = make_proggan(G_weights)
    else:
        G = make_external(G_weights)

    deformator = LatentDeformator(
        G.dim_z, type=DEFORMATOR_TYPE_DICT[args['deformator']])

    if 'shift_predictor' not in args.keys(
    ) or args['shift_predictor'] == 'ResNet':
        shift_predictor = ResNetShiftPredictor(G.dim_z)
    elif args['shift_predictor'] == 'LeNet':
        shift_predictor = LeNetShiftPredictor(
            G.dim_z, 1 if args['gan_type'] == 'SN_MNIST' else 3)

    deformator_model_path = os.path.join(
        models_dir, 'deformator_{}.pt'.format(model_index))
    shift_model_path = os.path.join(
        models_dir, 'shift_predictor_{}.pt'.format(model_index))
    if os.path.isfile(deformator_model_path):
        deformator.load_state_dict(torch.load(deformator_model_path))
    if os.path.isfile(shift_model_path):
        shift_predictor.load_state_dict(torch.load(shift_model_path))

    # try to load dims annotation
    directions_json = os.path.join(root_dir, 'directions.json')
    if os.path.isfile(directions_json):
        with open(directions_json, 'r') as f:
            directions_dict = json.load(f, object_pairs_hook=OrderedDict)
            setattr(deformator, 'directions_dict', directions_dict)

    return deformator.eval().cuda(), G.eval().cuda(), shift_predictor.eval(
    ).cuda()
def main():
    parser = argparse.ArgumentParser(
        description='GAN-based unsupervised segmentation train')
    parser.add_argument('--args',
                        type=str,
                        default=None,
                        help='json with all arguments')

    parser.add_argument('--out', type=str, required=True)
    parser.add_argument('--gan_weights', type=str, default=WEIGHTS['BigGAN'])
    parser.add_argument('--deformator_weights', type=str, required=True)
    parser.add_argument('--deformator_type',
                        type=str,
                        choices=DEFORMATOR_TYPE_DICT.keys(),
                        required=True)
    parser.add_argument('--background_dim', type=int, required=True)
    parser.add_argument('--classes',
                        type=int,
                        nargs='*',
                        default=list(range(1000)))
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--seed', type=int, default=2)

    parser.add_argument('--val_images_dir', type=str)
    parser.add_argument('--val_masks_dir', type=str)

    for key, val in SegmentationTrainParams().__dict__.items():
        parser.add_argument('--{}'.format(key), type=type(val), default=None)

    args = parser.parse_args()
    torch.random.manual_seed(args.seed)

    torch.cuda.set_device(args.device)
    if args.args is not None:
        with open(args.args) as args_json:
            args_dict = json.load(args_json)
            args.__dict__.update(**args_dict)

    # save run params
    if not os.path.isdir(args.out):
        os.makedirs(args.out)
    with open(os.path.join(args.out, 'args.json'), 'w') as args_file:
        json.dump(args.__dict__, args_file)
    with open(os.path.join(args.out, 'command.sh'), 'w') as command_file:
        command_file.write(' '.join(sys.argv))
        command_file.write('\n')

    if len(args.classes) == 0:
        print('using all ImageNet')
        args.classes = list(range(1000))
    G = make_big_gan(args.gan_weights, args.classes).eval().cuda()
    deformator = LatentDeformator(
        G.dim_z, type=DEFORMATOR_TYPE_DICT[args.deformator_type])
    deformator.load_state_dict(
        torch.load(args.deformator_weights, map_location=torch.device('cpu')))
    deformator.eval().cuda()

    model = UNet().train().cuda()
    train_params = SegmentationTrainParams(**args.__dict__)
    print('run train with params: {}'.format(train_params.__dict__))

    train_segmentation(G, deformator, model, train_params, args.background_dim,
                       args.out)

    if args.val_images_dir is not None:
        evaluate(model, args.val_images_dir, args.val_masks_dir,
                 os.path.join(args.out, 'score.json'), 128)