Esempio n. 1
0
    netG.apply(weights_init)
    pass
else:
    try:
        pretrained_model = torch.load('./Generators/' + args.model_id + '.pt')
        try:
            netG.load_state_dict(pretrained_model.state_dict())
        except:
            netG.load_state_dict(pretrained_model)
    except:
        print('G weight not match, random init')
# Print the model
print(netG)

# Create the encoder
netE = Encoder(args).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (args.ngpu > 1):
    netE = nn.DataParallel(netE, list(range(args.ngpu)))

# Apply the weights_init function to randomly initialize all weights
#  to mean=0, stdev=0.2.
if args.model_id is 'default':
    netE.apply(weights_init)
    pass
else:
    try:
        pretrained_model = torch.load('./Encoders/' + args.model_id + '.pt')
        try:
            netE.load_state_dict(pretrained_model.state_dict())
Esempio n. 2
0
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]))

    # Create the dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=bs,
                                             shuffle=True,
                                             num_workers=args.num_workers)

# Decide which device we want to run on
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and args.ngpu > 0) else "cpu")

# Create the encoder
netE = Encoder(args).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (args.ngpu > 1):
    netE = nn.DataParallel(netE, list(range(args.ngpu)))

# Apply the weights_init function to randomly initialize all weights
#  to mean=0, stdev=0.2.
if args.model_id is 'default':
    netE.apply(weights_init)
    pass
else:
    try:
        pretrained_model = torch.load('./Encoders/' + args.model_id + '.pt')
        try:
            netE.load_state_dict(pretrained_model.state_dict())