Beispiel #1
0
def execute(args):
    dataset1 = getattr(images, args.dataset1)
    dataset2 = getattr(images, args.dataset2)
    train_loader1, valid_loader1, test_loader1, shape1, nc = dataset1(
        args.dataset_loc1, args.train_batch_size, args.test_batch_size, args.valid_split)
    train_loader2, valid_loader2, test_loader2, shape2, _ = dataset2(
        args.dataset_loc2, args.train_batch_size, args.test_batch_size, args.valid_split)
    args.loaders1 = (train_loader1, valid_loader1, test_loader1)
    args.loaders2 = (train_loader2, valid_loader2, test_loader2)
    args.shape1 = shape1
    args.shape2 = shape2

    model_definition = import_module('.'.join(('models', 'vmt_cluster', 'train')))
    model_parameters = get_args(args.semantic_model_path)
    model_parameters['nc'] = nc
    models = model_definition.define_models(shape1, **model_parameters)
    semantic = models['classifier']
    semantic = load_last_model(semantic, 'classifier', args.semantic_model_path)
    args.semantic = semantic

    model_definition = import_module('.'.join(('models', 'classifier', 'train')))
    model_parameters = get_args(args.eval_model_path)
    model_parameters['nc'] = nc
    models = model_definition.define_models(shape1, **model_parameters)
    evalY = models['classifier']
    evalY = load_last_model(evalY, 'classifier', args.eval_model_path)
    args.evalY = evalY

    train(args)
    evaluate_fid(args)
Beispiel #2
0
def execute(args):
    print(args)
    dataset1 = getattr(images, args.dataset1)
    train_loader1, test_loader1, shape1, nc = dataset1(args.dataset_loc1,
                                                       args.train_batch_size,
                                                       args.test_batch_size)
    args.loaders1 = (train_loader1, test_loader1)
    args.shape1 = shape1
    args.nc = nc

    model_definition = import_module('.'.join(
        ('models', args.eval_model, 'train')))
    model_parameters = get_args(args.eval_model_path)
    model_parameters['n_classes'] = nc
    models = model_definition.define_models(shape1, **model_parameters)
    evaluation = models['classifier']
    evaluation = load_last_model(evaluation, 'classifier',
                                 args.eval_model_path)
    args.evaluation = evaluation

    dataset2 = getattr(images, args.dataset2)
    train_loader2, test_loader2, shape2, _ = dataset2(args.dataset_loc2,
                                                      args.train_batch_size,
                                                      args.test_batch_size)
    args.loaders2 = (train_loader2, test_loader2)
    args.shape2 = shape2

    train(args)
Beispiel #3
0
def execute(args):
    print(args)
    dataset1 = getattr(images, args.dataset1)
    train_loader1, _, test_loader1, shape1, nc = dataset1(
        args.dataset_loc1, args.train_batch_size, args.test_batch_size)
    args.loaders1 = (train_loader1, test_loader1)
    args.shape1 = shape1
    args.nc = nc

    dataset2 = getattr(images, args.dataset2)
    train_loader2, _, test_loader2, shape2, _ = dataset2(
        args.dataset_loc2, args.train_batch_size, args.test_batch_size)
    args.loaders2 = (train_loader2, test_loader2)
    args.shape2 = shape2

    model_definition = import_module('.'.join(('models', args.cluster_model, 'train')))
    model_parameters = get_args(args.cluster_model_path)
    model_parameters['n_classes'] = nc
    models = model_definition.define_models(shape1, **model_parameters)
    cluster = models['encoder']
    cluster = load_last_model(cluster, 'encoder', args.cluster_model_path)
    args.cluster = cluster


    train(args)
Beispiel #4
0
def define_last_model(model_type, model_path, model_name, **kwargs):
    model_definition = import_module('.'.join(('models', model_type, 'train')))
    model_parameters = get_args(model_path)
    model_parameters.update(kwargs)

    models = model_definition.define_models(**model_parameters)
    model = models[model_name]
    return load_last_model(model, model_name, model_path)
Beispiel #5
0
def execute(args):
    data_root_src = args.data_root_src
    domain = args.domain
    nz = 16
    save_path = args.save_path
    state_dict_path = get_last_model('nets_ema', save_path)

    device = 'cuda'
    domain = int(domain)
    # Load model
    state_dict = torch.load(state_dict_path, map_location='cpu')
    generator = Generator(bottleneck_size=64,
                          bottleneck_blocks=4,
                          img_size=args.img_size,
                          max_conv_dim=args.max_conv_dim).to(device)
    generator.load_state_dict(state_dict['generator'])

    nr = get_args(save_path)['repr_dim']
    mapping = MappingNetwork(nr=nr)
    mapping.load_state_dict(state_dict['mapping_network'])
    mapping.to(device)

    sem_type = get_args(save_path)['sem_type']
    sem_path = get_args(save_path)['sem_path'] if 'sem_path' in get_args(
        save_path) else None
    print(get_args(save_path))
    print(sem_type, sem_path)
    sem = semantics(sem_type, sem_path).cuda()
    sem.eval()

    classifier = define_last_model('classifier',
                                   args.classifier_path,
                                   'classifier',
                                   shape=3,
                                   nc=10).to(device)
    classifier.eval()

    dataset = getattr(images, args.dataset_src)
    src_dataset = dataset(data_root_src, 1, 32)[2]

    accuracy = evaluate(src_dataset, nz, domain, sem, mapping, generator,
                        classifier, device)
    print(accuracy)

    save_result(save_path, args.identifier, state_dict_path, accuracy)
Beispiel #6
0
def execute(args):
    state_dict_path = args.state_dict_path
    domain = args.domain
    name = args.save_name

    device = 'cuda'
    N = 5
    latent_dim = 16
    domain = int(domain)
    # Load model
    state_dict = torch.load(state_dict_path, map_location='cpu')
    bottleneck_size = get_args(args.model_path)['bottleneck_size']
    generator = Generator(bottleneck_size=bottleneck_size,
                          bottleneck_blocks=4).to(device)
    generator.load_state_dict(state_dict['generator'])
    mapping = MappingNetwork()
    mapping.load_state_dict(state_dict['mapping_network'])
    mapping.to(device)

    sem = semantics(args.ss_path, 'vmtc_repr', args.da_path, nc=5).to(device)

    dataset = dataset_single(args.data_root_src)
    idxs = [0, 15, 31, 50, 60]
    data = []
    for i in range(N):
        idx = idxs[i]
        data.append(dataset[idx])
    data = torch.stack(data).to(device)

    y_src = sem((data + 1) * 0.5).argmax(1)
    print(y_src)

    # Infer translated images
    d_trg = torch.tensor(domain).repeat(25).long().to(device)
    z_trg = torch.cat(5 * [torch.randn(1, 5, latent_dim)]).to(device)
    z_trg = z_trg.transpose(0, 1).reshape(25, latent_dim)
    data = torch.cat(5 * [data])
    y_src = torch.cat(5 * [y_src])
    print(z_trg.shape, data.shape, y_src.shape)

    N, C, H, W = data.size()
    x_concat = [data]

    print(z_trg.shape, y_src.shape, d_trg.shape)
    s_trg = mapping(z_trg, d_trg)
    print(data.shape, s_trg.shape)
    x_fake = generator(data, y_src, s_trg)
    x_concat += [x_fake]

    x_concat = torch.cat(x_concat, dim=0)
    print(x_concat[:5].shape, x_concat[N:].shape)
    results = torch.cat([x_concat[:5], x_concat[N:]])
    save_image(results, 5, f'{name}.png')
Beispiel #7
0
def execute(args):
    device = 'cuda'
    latent_dim = 16
    batch_size = 128
    # Load model
    save_path = args.save_path
    state_dict_path = get_last_model('nets_ema', save_path)
    state_dict = torch.load(state_dict_path, map_location='cpu')

    bottleneck_size = get_args(save_path)['bottleneck_size']
    generator = Generator(bottleneck_size=bottleneck_size,
                          bottleneck_blocks=4,
                          img_size=args.img_size).to(device)
    generator.load_state_dict(state_dict['generator'])
    mapping = MappingNetwork(nc=args.nc)
    mapping.load_state_dict(state_dict['mapping_network'])
    mapping.to(device)

    sem = semantics(args.ss_path,
                    args.model_type,
                    args.da_path,
                    nc=args.nc,
                    shape=[3, args.img_size]).to(device)
    sem.eval()

    dataset = getattr(images, args.dataset_src)(args.data_root_src)
    src = torch.utils.data.DataLoader(dataset,
                                      batch_size=batch_size,
                                      num_workers=10)
    dataset = getattr(images, args.dataset_tgt)(args.data_root_tgt)
    trg = torch.utils.data.DataLoader(dataset,
                                      batch_size=batch_size,
                                      num_workers=10)

    print(f'Src size: {len(src)}, Tgt size: {len(trg)}')
    generated = []
    #print('Fetching generated data')
    d = torch.tensor(args.domain).repeat(batch_size).long().to(device)
    for data in src:
        data = data.to(device)
        d_trg = d[:data.shape[0]]
        y_trg = sem((data + 1) * 0.5).argmax(1)
        for i in range(5):
            z_trg = torch.randn(data.shape[0], latent_dim, device=device)
            s_trg = mapping(z_trg, y_trg, d_trg)
            gen = generator(data, s_trg)
            generated.append(gen)
    generated = torch.cat(generated)
    generated = normalize(generated)
    #save_image(generated[:4], 'Debug.png')

    #print('Fetching target data')
    trg_data = []
    for data in trg:
        data = data.to(device)
        trg_data.append(data)
    trg_data = torch.cat(trg_data)
    #print(trg_data.shape)

    trg_data = normalize(trg_data)
    #print(generated.min(), generated.max(), trg_data.min(), trg_data.max())
    computed_fid = fid.calculate_fid(trg_data, generated, 512, device, 2048)
    print(f'FID: {computed_fid}')
    save_result(save_path, args.identifier, state_dict_path, computed_fid)