Пример #1
0
# ===================================================================================================== #
batch_size = {'train': 32, 'valid': 32}
dataloader = {
    phase:
    torch.utils.data.DataLoader(dataset=SrDataset(phase, dire, width, height),
                                batch_size=batch_size[phase],
                                shuffle=False)
    for phase in ['valid']
}

use_gpu = torch.cuda.is_available()
module = Module()
module.load_state_dict(torch.load(pretrained))

fid = open('parameters', 'wb+')
for param in module.parameters():
    b = param.data.numpy()
    fid.write(b)
fid.close()

if use_gpu:
    module.cuda()
    module = nn.DataParallel(module, gpu)

for stage in ([0] * 1):
    #   for epoch in range(1):
    for phase in ["valid"]:
        print("Testing...")
        module.train(False)
        for param in module.parameters():
            param.requires_grad_(False)
Пример #2
0
batch_sz = {'train': 12, 'valid': 10}
dataloader = {
    phase:
    torch.utils.data.DataLoader(dataset=SrDataset(phase, width, height,
                                                  img_dir),
                                batch_size=batch_sz[phase],
                                shuffle=True)
    for phase in ['train', 'valid']
}

use_gpu = torch.cuda.is_available()

module = Module()

if pretrained is None:
    for param in module.parameters():
        print(param.size())
        param.data.normal_(0.001, 0.05)
else:
    module.load_state_dict(torch.load(pretrained))

if use_gpu:
    module.cuda()
    #module = nn.DataParallel(module, gpu)

# print(module)
loss = nn.MSELoss()
optimizer = torch.optim.Adam(module.parameters(), lr=1)
#optimizer = nn.DataParallel(optimizer,gpu).module

lam = torch.tensor(0.025).cuda()