Example #1
0
                        batch_size=batch_size,
                        shuffle=False,
                        drop_last=True)
print("Validation batches: {}".format(len(valiLoader)))

# setup training
epochs = int(iterations / len(trainLoader) + 0.5)
netG = TurbNetG(channelExponent=expo, dropout=dropout)
print(netG)  # print full net
model_parameters = filter(lambda p: p.requires_grad, netG.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Initialized TurbNet with {} trainable params ".format(params))

netG.apply(weights_init)
if len(doLoad) > 0:
    netG.load_state_dict(torch.load(doLoad))
    print("Loaded model " + doLoad)
netG.cuda()

criterionL1 = nn.L1Loss()
criterionL1.cuda()

optimizerG = optim.Adam(netG.parameters(),
                        lr=lrG,
                        betas=(0.5, 0.999),
                        weight_decay=0.0)

targets = Variable(torch.FloatTensor(batch_size, 3, 128, 128))
inputs = Variable(torch.FloatTensor(batch_size, 3, 128, 128))
targets = targets.cuda()
inputs = inputs.cuda()
Example #2
0
losses = []
models = []

# for si in range(25):
for si in range(1):
    s = chr(96 + si)
    if (si == 0):
        s = ""  # check modelG, and modelG + char
    # modelFn = "./" + prefix + "modelG{}{}".format(suffix,s)
    modelFn = path
    if not os.path.isfile(modelFn):
        continue

    models.append(modelFn)
    log(lf, "Loading " + modelFn)
    netG.load_state_dict(torch.load(modelFn, map_location=device))
    log(lf, "Loaded " + modelFn)
    netG.to(device)

    criterionL1 = nn.L1Loss()
    criterionL1.to(device)
    L1val_accum = 0.0
    L1val_dn_accum = 0.0
    lossPer_p_accum = 0
    lossPer_v_accum = 0
    lossPer_accum = 0

    netG.eval()

    for i, data in enumerate(testLoader, 0):
        inputs_cpu, targets_cpu = data
Example #3
0
models = []
loss_p_list = []
loss_v_list = []
accum_list = []

for si in range(25):
    s = chr(96 + si)
    if (si == 0):
        s = ""  # check modelG, and modelG + char
    modelFn = "./" + prefix + "modelG{}{}".format(suffix, s)
    if not os.path.isfile(modelFn):
        continue

    models.append(modelFn)
    log(lf, "Loading " + modelFn)
    netG.load_state_dict(torch.load(modelFn))
    log(lf, "Loaded " + modelFn)
    netG.cuda()

    criterionLoss = nn.L1Loss()
    #criterionLoss = nn.MSELoss()
    #criterionLoss = nn.SmoothL1Loss()
    criterionLoss.cuda()
    Lossval_accum = 0.0
    Lossval_dn_accum = 0.0
    lossPer_p_accum = 0
    lossPer_v_accum = 0
    lossPer_accum = 0

    netG.eval()
Example #4
0
def getModel(expo):
    netG = TurbNetG(channelExponent=expo).to(device)
    netG.load_state_dict(torch.load(f'models/model_w_{expo}', map_location=device))
    netG.eval()
    return netG