Ejemplo n.º 1
0
    return train_mask


def load_w(w_path, w_list):
    w = np.zeros((n_pic, 18, 512))
    for i, fn in enumerate(w_list):
        w[i] = np.load(w_path + fn)
    return torch.from_numpy(w).float().cuda()


with torch.no_grad():
    '''
    Set input_is_Wlatent | True for W-latent , False for Z-latent
    '''
    models = model.Generator(size=1024,
                             style_dim=512,
                             n_mlp=8,
                             input_is_Wlatent=True).to(device)
    models.load_state_dict(state_dict['g_ema'], strict=False)
    models = InstrumentedModel(models)
    models.eval()
    models.cuda()

    models.retain_layers([
        'convs.0', 'convs.1', 'convs.2', 'convs.3', 'convs.4', 'convs.5',
        'convs.6', 'convs.7', 'convs.8', 'convs.9', 'convs.10', 'convs.11',
        'convs.12', 'convs.13', 'convs.14', 'convs.15'
    ])
    '''
    Load Latent
    [1,1,512] for Z
    [1,18,512] for W+
    # print(w.shape)
    # assert False

    im_list.append(w.unsqueeze(0))

# w_path = './dataset/WLatent200/'
#
# all_w = load_w(w_path, num_pic)
#
# ffhq = TensorDataset(all_w)

im_latent = torch.cat([i for i in im_list], dim=0)

with torch.no_grad():
    models = model.Generator(size=512,
                             style_dim=512,
                             n_mlp=8,
                             input_is_Wlatent=False).to(device)
    models.load_state_dict(state_dict['g_ema'], strict=False)
    models = InstrumentedModel(models)
    models.eval()
    models.cuda()

    models.retain_layers([
        'convs.0', 'convs.1', 'convs.2', 'convs.3', 'convs.4', 'convs.5',
        'convs.6', 'convs.7', 'convs.8', 'convs.9', 'convs.10', 'convs.11',
        'convs.12', 'convs.13'
    ])
    # ffhq_noise_dataset = torch.utils.data.DataLoader(ffhq,
    #         batch_size=1, num_workers=0, pin_memory=False)

    all_iou = []