Exemplo n.º 1
0
                         batch_size=batch_size,
                         shuffle=True,
                         drop_last=True)
print("Training batches: {}".format(len(trainLoader)))
dataValidation = dataset.ValiDataset(data)
valiLoader = DataLoader(dataValidation,
                        batch_size=batch_size,
                        shuffle=False,
                        drop_last=True)
print("Validation batches: {}".format(len(valiLoader)))

# setup training
epochs = int(iterations / len(trainLoader) + 0.5)
netG = TurbNetG(channelExponent=expo, dropout=dropout)
print(netG)  # print full net
model_parameters = filter(lambda p: p.requires_grad, netG.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Initialized TurbNet with {} trainable params ".format(params))

netG.apply(weights_init)
if len(doLoad) > 0:
    netG.load_state_dict(torch.load(doLoad))
    print("Loaded model " + doLoad)
netG.cuda()

criterionL1 = nn.L1Loss()
criterionL1.cuda()

optimizerG = optim.Adam(netG.parameters(),
                        lr=lrG,
                        betas=(0.5, 0.999),
Exemplo n.º 2
0
# create pytorch data object with dfp dataset
data = dataset.TurbDataset(prop, shuffle=1, dataDir="../data/make-datas/256/train/", dataDirTest="../data/make-datas/256/test/", res=res)
trainLoader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True)
print("Training batches: {}".format(len(trainLoader)))
print("Training batches: {}".format(len(trainLoader)), file=fout)
dataValidation = dataset.ValiDataset(data)
valiLoader = DataLoader(dataValidation, batch_size=batch_size, shuffle=False, drop_last=True) 
print("Validation batches: {}".format(len(valiLoader)))
print("Validation batches: {}".format(len(valiLoader)), file=fout)

# setup training
epochs = int((iterations/len(trainLoader))*2 + 0.5)
netG = TurbNetG(channelExponent=expo, dropout=dropout)
print(netG) # print full net
print(netG, file=fout) # print train.txt
model_parameters = filter(lambda p: p.requires_grad, netG.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Initialized TurbNet with {} trainable params ".format(params))
print("Initialized TurbNet with {} trainable params ".format(params), file=fout)

netG.apply(weights_init)
if len(doLoad)>0:
    netG.load_state_dict(torch.load(doLoad))
    print("Loaded model "+doLoad)
netG.cuda()

#cost function(L1, MSE, SmoothL1)
#criterionLoss = nn.L1Loss()
criterionLoss = nn.MSELoss()
#criterionLoss = nn.SmoothL1Loss()
criterionLoss.cuda()