コード例 #1
0
    parser.add_argument('--pixel_norm', default=False, action="store_true",
                        help='a normalization method inside the model, you can try use it or not depends on the dataset')
    parser.add_argument('--tanh', default=False, action="store_true",
                        help='an output non-linearity on the output of Generator, you can try use it or not depends on the dataset')

    args = parser.parse_args()

    print(str(args))

    trial_name = args.trial_name
    device = torch.device("cuda:%d" % (args.gpu_id))
    input_code_size = args.z_dim
    batch_size = args.batch_size
    n_critic = args.n_critic

    generator = Generator(in_channel=args.channel, input_code_dim=input_code_size, pixel_norm=args.pixel_norm,
                          tanh=args.tanh).to(device)
    discriminator = Discriminator(feat_dim=args.channel).to(device)
    g_running = Generator(in_channel=args.channel, input_code_dim=input_code_size, pixel_norm=args.pixel_norm,
                          tanh=args.tanh).to(device)

    # # you can directly load a pretrained model here
    # generator.load_state_dict(torch.load('./tr checkpoint/150000_g.model'))
    # g_running.load_state_dict(torch.load('checkpoint/150000_g.model'))
    # discriminator.load_state_dict(torch.load('checkpoint/150000_d.model'))

    g_running.train(False)

    g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.0, 0.99))
    d_optimizer = optim.Adam(discriminator.parameters(), lr=(args.lr * 0.005), betas=(0.0, 0.99))

    accumulate(g_running, generator, 0)
コード例 #2
0
    output_matrix = ref_k_array * random_matrix
    # output_matrix = torch.zeros_like(input_matrix)
    return torch.as_tensor(output_matrix, dtype=torch.float32,
                           device=device), torch.as_tensor(random_matrix,
                                                           dtype=torch.float32,
                                                           device=device)


device = 'cuda:0'
# device = 'cpu'
b_size = 32
input_z_size = 32

generator = Generator(in_channel=128,
                      input_z_channels=input_z_size,
                      pixel_norm=False,
                      tanh=False).to(device)

generator.load_state_dict(
    torch.load('trial_test18_2021-06-24_12_58/checkpoint/400000_g.model'))

# Prepare z vectors for training (top_bottom)
gen_z_first_half = torch.randn(b_size // 2, input_z_size, 2, 2).to(device)
gen_z_top_swap = torch.flip(
    gen_z_first_half[:, :, 0:gen_z_first_half.shape[2] // 2, :], dims=[0])
gen_z_bttm = gen_z_first_half[:, :, gen_z_first_half.shape[2] //
                              2:gen_z_first_half.shape[2], :]
gen_z_second_half = torch.cat((gen_z_top_swap, gen_z_bttm), dim=2)
gen_z = torch.cat((gen_z_first_half, gen_z_second_half), dim=0)

print(gen_z_top_swap.shape)
コード例 #3
0
    print(audio_feats.shape)

    # make smooth music-guided noise vectors for feeding into Gen along with audio feats
    noise_vectors = musicality_noise_vectors(specm, gradm)

    # make audio dataloader
    data = music_data(audio_feats, noise_vectors, frame_batch=frame_batch)
    data_loader = DataLoader(data,
                             shuffle=False,
                             batch_size=batch_size,
                             num_workers=4)

    # load generator model
    g_running = Generator(in_channel=128,
                          out_channel=frame_batch * 3,
                          input_code_dim=256,
                          pixel_norm=False,
                          tanh=False).to(device)
    g_running.load_state_dict(torch.load(checkpoint_file))
    g_running.train(False)

    #pass audio feats through generator
    print("Generating Video...")
    all_frames = []
    alpha = 1
    step = 6  #step 6 does full resolution of 256x256 images
    #detransform = transforms.Denormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    noise_const = torch.randn(batch_size, 128).to(device)
    for feats, noise in tqdm(data_loader):
        #print(label.size())
        #label = next(data_loader) # label is the audio feat
コード例 #4
0
    parser.add_argument('--pixel_norm', default=False, action="store_true",
                        help='a normalization method inside the model, you can try use it or not depends on the dataset')
    parser.add_argument('--tanh', default=False, action="store_true",
                        help='an output non-linearity on the output of Generator, you can try use it or not depends on the dataset')

    args = parser.parse_args()

    print(str(args))

    trial_name = args.trial_name
    device = torch.device("cuda:%d" % (args.gpu_id))
    input_z_channels = args.z_dim    # Thicness of z vector
    batch_size = args.batch_size
    n_critic = args.n_critic

    generator = Generator(in_channel=args.channel, input_z_channels=input_z_channels, pixel_norm=args.pixel_norm,
                          tanh=args.tanh).to(device)
    discriminator = Discriminator(feat_dim=args.channel).to(device)
    g_running = Generator(in_channel=args.channel, input_z_channels=input_z_channels, pixel_norm=args.pixel_norm,
                          tanh=args.tanh).to(device)

    # # you can directly load a pretrained model here
    # generator.load_state_dict(torch.load('./tr checkpoint/150000_g.model'))
    # g_running.load_state_dict(torch.load('checkpoint/150000_g.model'))
    # discriminator.load_state_dict(torch.load('checkpoint/150000_d.model'))

    g_running.train(False)

    g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.0, 0.99))
    d_optimizer = optim.Adam(discriminator.parameters(), lr=(args.lr * 0.08), betas=(0.0, 0.99))

    accumulate(g_running, generator, 0)