Exemple #1
0
                    action="store_true",
                    help="use cuda?")
parser.add_argument("--model",
                    default="model_adam/model_epoch_60.pth",
                    type=str,
                    help="model path")
#parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--scale",
                    default=4,
                    type=int,
                    help="scale factor, Default: 4")
parser.add_argument("--input", default="", help="input file path")
parser.add_argument("--output", default="", help="save path for output")

from srdense.thinnet import tinynet as srnet
model = srnet()
model_name = 'tiny_model'
model_folder = os.path.join('model_adam', model_name)
load_epoch = 200  #133

weights_name = 'model'

#img_root = os.path.join(proj_root, 'data' ,test_folder)
#save_folder = os.path.join(proj_root, 'data','hd_results', test_folder, model_name)
#save_folder = os.path.join('hd_results', model_name)
#if not os.path.exists(save_folder):
#    os.makedirs(save_folder)

opt = parser.parse_args()
cuda = opt.cuda
use_cuda = cuda and torch.cuda.is_available()
Exemple #2
0
def main():

    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print(("Random Seed: ", opt.seed))
    torch.manual_seed(opt.seed)
    if cuda:
        import torch.backends.cudnn as cudnn
        torch.cuda.manual_seed(opt.seed)
        cudnn.benchmark = True

    print("===> Building model")
    model = srnet()
    criterion = L1_Charbonnier_loss()

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
        ssim_sim = SSIM().cuda()

    # optionally resume from a checkpoint
    if opt.resume is True:
        model_path = os.path.join(
            model_folder, 'model_epoch_{}.pth'.format(opt.reload_epoch))

        if os.path.isfile(model_path):
            print(("=> loading checkpoint '{}'".format(model_path)))
            model_state = torch.load(model_path)
            model.load_state_dict(model_state)
            opt.start_epoch = opt.reload_epoch + 1

        else:
            print(("=> no checkpoint found at '{}'".format(opt.resume)))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print(("=> loading model '{}'".format(opt.pretrained)))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print(("=> no model found at '{}'".format(opt.pretrained)))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    #optimizer    = optim.SGD(model.parameters(), lr=opt.lr, momentum = 0.9, nesterov=True)

    print("===> Training")
    model.train()

    print("===> Loading datasets")
    #train_set = DatasetFromHdf5("/path/to/your/dataset/like/imagenet_50K.h5")
    home = os.path.expanduser('~')
    hd_folder = os.path.join(home, 'DataSet', 'SR_DATA', 'HD')
    #hd_folder = os.path.join('data', 'HD')

    training_data_loader = DatasetFromfolder(hd_folder,
                                             batch_size=opt.batch_size,
                                             img_size=256)

    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        #train(training_data_loader, optimizer, model, criterion, epoch)

        # lr = adjust_learning_rate(optimizer, epoch-1)
        lr = opt.lr * (0.5**((epoch - 1) // opt.step))

        for param_group in optimizer.param_groups:
            param_group["lr"] = lr
        print("epoch =", epoch, "lr =", optimizer.param_groups[0]["lr"])

        for iteration in range(training_data_loader.epoch_iteration):

            batch_data = training_data_loader.get_next()

            inputs = Variable(torch.from_numpy(batch_data[0]),
                              requires_grad=False)
            label = Variable(torch.from_numpy(batch_data[1]),
                             requires_grad=False)

            if opt.cuda:
                inputs = inputs.cuda()
                label = label.cuda()
            #print(inputs.size())
            out = model(inputs)

            l1_loss = criterion(out, label)
            ssim_loss = -ssim_sim(out, label)
            loss = l1_loss + ssim_loss

            optimizer.zero_grad()

            loss.backward()

            optimizer.step()

            l1_plot.plot(l1_loss.cpu().data.numpy()[0])
            ssim_plot.plot(ssim_loss.cpu().data.numpy()[0])

            if iteration % 100 == 0:
                print(("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(
                    epoch, iteration, training_data_loader.epoch_iteration,
                    loss.data[0])))
                #print("total gradient", total_gradient(model.parameters()))

                reg_img_np = batch_data[0][0:1]
                hd_img_np = batch_data[1][0:1]
                recoverd_img_np = out.data.cpu().numpy()[0:1]
                img_disply = [reg_img_np, hd_img_np, recoverd_img_np]

                returned_img = save_images(img_disply,
                                           save_path=None,
                                           save=False,
                                           dim_ordering='th')
                plot_img(X=returned_img,
                         win='reg_hd_recovered',
                         env=model_name)

        # save the checkpoints every epoch

        if epoch > 0 and epoch % opt.save_freq == 0:
            torch.save(
                model.state_dict(),
                os.path.join(model_folder, 'model_epoch_{}.pth'.format(epoch)))
            print('save weights at {}'.format(model_folder))