예제 #1
0
def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    lr = 0.001
    w_decay = 0  #1.0e-4
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
    gamma = 0.99
    #scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma, last_epoch=-1)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                5,
                                                gamma,
                                                last_epoch=-1)
    # Make info for suptitile
    info = str(layer)[27:-2] + ": " + str(
        optimizer_name)[25:-2] + " with " + str(scheduler)[26:-26]
    info += ", lr_init = " + str(lr) + ", w_decay = " + str(
        w_decay) + ", gamma = " + str(gamma)

    psnrfunc = lf.PSNRLoss()
    epoch_psnr = []

    num_epochs = 500
    start = time.time()
    epochs = tqdm.trange(num_epochs, desc="Start training", leave=True)
    try:
        for epoch in epochs:
            batch_loss = []
            batch_psnr = []
            for HiResIm, LoResIm in zip(HR_loader, LR_loader):

                HiResIm = HiResIm.unsqueeze_(1).float()
                b, c, h, w = HiResIm.size()
                LoResIm = LoResIm.unsqueeze_(1).float()
                HiResIm = torch.autograd.Variable(HiResIm).to(device)
                LoResIm = torch.autograd.Variable(LoResIm).to(device)

                output = model(LoResIm).float()
                # normalize_me = output.clone().unsqueeze(1)
                # for normindex in range(b):
                #     lmin = torch.min(normalize_me[normindex]).float()
                #     lmax = torch.max(normalize_me[normindex]).float()
                #     output[normindex] = (normalize_me[normindex] - lmin)/(lmax-lmin)

                current_psnr = psnr(HiResIm.cpu().detach().numpy(),
                                    output.cpu().detach().numpy())
                #current_psnr = 0.0#psnrfunc(HiResIm, output)
                batch_psnr.append(current_psnr)

                loss = lossfunc(
                    output,
                    HiResIm).float() + lf.TVLoss(TV_weight)(output).float()
                # loss /= (b*c*w*h)
                # loss /= w*h

                a = list(model.parameters())[1].clone()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                b = list(model.parameters())[1].clone()
                #print(torch.equal(a.data,b.data))

                lossvalue = loss.item()
                all_loss.append(lossvalue)
                batch_loss.append(lossvalue)
                if torch.isnan(loss).sum() > 0:
                    print("nans in the loss function")
                    raise ValueError

                epochs.set_description(
                    "lr = {:.1e}, loss = {:.5e}, psnr = {:.2}".format(
                        scheduler.get_lr()[0], np.mean(batch_loss),
                        current_psnr))
                epochs.refresh()
                scheduler.step()

            epoch_loss.append(np.mean(batch_loss))
            epoch_psnr.append(sum(batch_psnr) / len(batch_psnr))
        print("Training finished, took ", round(time.time() - start, 2),
              "s to complete")
    except (KeyboardInterrupt, SystemExit):
        print("\nscript execution halted ..")
        #print("loss = ", all_loss)
        sys.exit()
    except ValueError:
        print("\nnan found ..")
        #print("loss = ", all_loss)
        sys.exit()
    return epoch_loss, epoch_psnr, info
예제 #2
0
test_LRimages = LRimages[Ntrain:Ntrain + Ntest]
print('{} training images'.format(len(train_HRimages)))
print('{} testing images'.format(len(test_HRimages)))

HR_loader = torch.utils.data.DataLoader(
    train_HRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)
LR_loader = torch.utils.data.DataLoader(
    train_LRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)

lossfunc = torch.nn.SmoothL1Loss()
#lossfunc = torch.nn.MSELoss()
TV_weight = 0  #1.e-4
SL_weight = 0  #1.e-4

num_epochs = 50
tvloss = lf.TVLoss(TV_weight)
styleloss = lf.StyleLoss(SL_weight)


def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    epoch_psnr = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    #lr = 0.001
    w_decay = 0  #1.0e-4
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
예제 #3
0
test_LRimages = LRimages[Ntrain:Ntrain + Ntest]
print('{} training images'.format(len(train_HRimages)))
print('{} testing images'.format(len(test_HRimages)))

HR_loader = torch.utils.data.DataLoader(
    train_HRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)
LR_loader = torch.utils.data.DataLoader(
    train_LRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)

lossfunc = torch.nn.SmoothL1Loss()
#lossfunc = torch.nn.MSELoss()
TV_weight = 0  #1.e-4
SL_weight = 0  #1.e-10

num_epochs = 2
tvloss = lf.TVLoss(TV_weight)
styleloss = lf.StyleLoss(SL_weight)


def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    epoch_psnr = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
    gamma = 0.97
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
예제 #4
0
train_HRimages = HRimages[0:Ntrain]
test_HRimages = HRimages[Ntrain:Ntrain + Ntest]
train_LRimages = LRimages[0:Ntrain]
test_LRimages = LRimages[Ntrain:Ntrain + Ntest]
print('{} training images'.format(len(train_HRimages)))
print('{} testing images'.format(len(test_HRimages)))

HR_loader = torch.utils.data.DataLoader(
    train_HRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)
LR_loader = torch.utils.data.DataLoader(
    train_LRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)

lossfunc = torch.nn.SmoothL1Loss()
#lossfunc = torch.nn.MSELoss()
TV_weight = 5.e-5
styleloss = lf.StyleLoss(1.0e-14)

num_epochs = 100


def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    lr = 0.001
    w_decay = 1.0e-5
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
예제 #5
0
def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    lr = 0.01
    w_decay = 0
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
    gamma = 0.9
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                       gamma,
                                                       last_epoch=-1)
    # Make info for suptitile
    info = str(layer)[27:-2] + ": " + str(
        optimizer_name)[25:-2] + " with " + str(scheduler)[26:-26]
    info += ", lr_init = " + str(lr) + ", w_decay = " + str(
        w_decay) + ", gamma = " + str(gamma)

    num_epochs = 100
    start = time.time()
    epochs = tqdm.trange(num_epochs, desc="Start training", leave=True)
    try:
        for epoch in epochs:
            batch_loss = []
            for HiResIm, LoResIm in zip(HR_loader, LR_loader):
                HiResIm = HiResIm.unsqueeze_(1).float()
                b, c, h, w = HiResIm.size()
                LoResIm = LoResIm.unsqueeze_(1).float()
                HiResIm = torch.autograd.Variable(HiResIm).to(device)
                LoResIm = torch.autograd.Variable(LoResIm).to(device)

                output = model(LoResIm).float()
                loss = lossfunc(
                    output,
                    HiResIm).float() + lf.TVLoss(TV_weight)(output).float()
                loss /= (b * c * w * h)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                lossvalue = loss.item()
                all_loss.append(lossvalue)
                batch_loss.append(lossvalue)
                if torch.isnan(loss).sum() > 0:
                    raise ValueError

                epochs.set_description("lr = {:.2e}, loss = {:.6e}".format(
                    scheduler.get_lr()[0], lossvalue))
                epochs.refresh()
                scheduler.step()

            epoch_loss.append(np.mean(batch_loss))
        print("Training finished, took ", round(time.time() - start, 2),
              "s to complete")
    except (KeyboardInterrupt, SystemExit):
        print("\nscript execution halted ..")
        print("loss = ", all_loss)
        sys.exit()
    except ValueError:
        print("\nnan found ..")
        print("loss = ", all_loss)
        sys.exit()
    return epoch_loss, info
예제 #6
0
test_LRimages = LRimages[Ntrain:Ntrain + Ntest]
print('{} training images'.format(len(train_HRimages)))
print('{} testing images'.format(len(test_HRimages)))

HR_loader = torch.utils.data.DataLoader(
    train_HRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)
LR_loader = torch.utils.data.DataLoader(
    train_LRimages, shuffle=False, batch_size=batch_size)  #, pin_memory=cuda)

lossfunc = torch.nn.SmoothL1Loss()
#lossfunc = torch.nn.MSELoss()
TV_weight = 0  #1.e-4
SL_weight = 0  #1.e-10

num_epochs = 1
tvloss = lf.TVLoss(TV_weight)
styleloss = lf.StyleLoss(SL_weight)


def train(model):
    model.train()
    if cuda:
        model = model.cuda()

    epoch_loss = []
    all_loss = []
    optimizer_name = torch.optim.Adam
    lr = 0.001
    w_decay = 0  #1.0e-4
    optimizer = optimizer_name(model.parameters(), lr=lr, weight_decay=w_decay)
    gamma = 0.99