data = dataset.TurbDataset(prop, shuffle=1) trainLoader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True) print("Training batches: {}".format(len(trainLoader))) dataValidation = dataset.ValiDataset(data) valiLoader = DataLoader(dataValidation, batch_size=batch_size, shuffle=False, drop_last=True) print("Validation batches: {}".format(len(valiLoader))) # setup training epochs = int(iterations / len(trainLoader) + 0.5) netG = TurbNetG(channelExponent=expo, dropout=dropout) print(netG) # print full net model_parameters = filter(lambda p: p.requires_grad, netG.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print("Initialized TurbNet with {} trainable params ".format(params)) netG.apply(weights_init) if len(doLoad) > 0: netG.load_state_dict(torch.load(doLoad)) print("Loaded model " + doLoad) netG.cuda() criterionL1 = nn.L1Loss() criterionL1.cuda() optimizerG = optim.Adam(netG.parameters(),
targets = torch.FloatTensor(1, 3, 128, 128) targets = Variable(targets) targets = targets.to(device) inputs = torch.FloatTensor(1, 3, 128, 128) inputs = Variable(inputs) inputs = inputs.to(device) targets_dn = torch.FloatTensor(1, 3, 128, 128) targets_dn = Variable(targets_dn) targets_dn = targets_dn.to(device) outputs_dn = torch.FloatTensor(1, 3, 128, 128) outputs_dn = Variable(outputs_dn) outputs_dn = outputs_dn.to(device) netG = TurbNetG(channelExponent=expo) lf = "./" + prefix + "testout{}.txt".format(suffix) utils.makeDirs(["results_test"]) # loop over different trained models avgLoss = 0. losses = [] models = [] # for si in range(25): for si in range(1): s = chr(96 + si) if (si == 0): s = "" # check modelG, and modelG + char # modelFn = "./" + prefix + "modelG{}{}".format(suffix,s) modelFn = path
targets = torch.FloatTensor(1, 3, res, res) targets = Variable(targets) targets = targets.cuda() inputs = torch.FloatTensor(1, 3, res, res) inputs = Variable(inputs) inputs = inputs.cuda() targets_dn = torch.FloatTensor(1, 3, res, res) targets_dn = Variable(targets_dn) targets_dn = targets_dn.cuda() outputs_dn = torch.FloatTensor(1, 3, res, res) outputs_dn = Variable(outputs_dn) outputs_dn = outputs_dn.cuda() netG = TurbNetG(channelExponent=expo) lf = "./" + prefix + "testout{}.txt".format(suffix) utils.makeDirs(["results_test"]) # loop over different trained models avgLoss = 0. losses = [] models = [] loss_p_list = [] loss_v_list = [] accum_list = [] for si in range(25): s = chr(96 + si) if (si == 0): s = "" # check modelG, and modelG + char
def getModel(expo): netG = TurbNetG(channelExponent=expo).to(device) netG.load_state_dict(torch.load(f'models/model_w_{expo}', map_location=device)) netG.eval() return netG