def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    save_path = os.path.join('.', "model", "{}_{}".format(opt.model, opt.ID))
    log_dir = './records/{}_{}/'.format(opt.model, opt.ID)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    cuda = opt.cuda
    if cuda  and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    # opt.seed = 4222
    coeff_mse = opt.coeff_totalloss
    coeff_J = opt.coeff_J
    print("Random Seed: ", opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.traindata, opt.patchSize, opt.aug)
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

    print("===> Building model")
    elif opt.model == 'dense':
        model = Dense()
Beispiel #2
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
                raise Exception("No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/BSD300.h5")
    training_data_loader = DataLoader(dataset=train_set, batch_size=opt.batchSize, shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))  

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        if(epoch % 100 == 0):
            save_checkpoint(model, epoch)
Beispiel #3
0
def main():

    global opt, model
    opt = parser.parse_args()
    print opt

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("../lapsrn/data/data.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)
    print("===> Building model")
    model = Net()
    criterion = L1_Charbonnier_loss()
    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    else:
        model = model.cpu()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Beispiel #4
0
def main():

    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5('data/train.h5')
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    print("===> Setting Optimizer")
    optimizer = optim.SGD([{
        "params": model.module.features.parameters()
    }, {
        "params": model.module.scale.parameters(),
        "weight_decay": 0.0
    }],
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Beispiel #5
0
def main():
    print("args:")
    print(args)
    best_psnr, best_epoch = 0, 0
    #args.seed = random.randint(1, 10000)
    args.seed = 1
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = True

    tmp_train_dataPath = os.path.join(args.tmp_data_dir, 'data/train.h5')
    tmp_val_dataPath = os.path.join(args.tmp_data_dir, 'Set5_mat')

    train_set = DatasetFromHdf5(tmp_train_dataPath)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=args.threads,
                                      batch_size=args.batchSize,
                                      shuffle=True)

    val_imglist = glob.glob(tmp_val_dataPath + "/*.*")

    model = Net()
    model = torch.nn.DataParallel(model).cuda()
    criterion = nn.MSELoss(reduction='sum')
    criterion = criterion.cuda()

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=10,
                                             gamma=0.1)

    for epoch in range(args.start_epoch, args.nEpochs + 1):

        logging.info('current epoch {}, lr {:.5e}'.format(
            epoch, optimizer.param_groups[0]['lr']))
        train(
            training_data_loader,
            optimizer,
            model,
            criterion,
            epoch,
        )
        plot(epoch, lossList, 'mseLoss')
        lr_scheduler.step()
        psnr = validate(model, epoch, val_imglist)
        plot(epoch, psnrList[1:], 'psnr')
        is_best = psnr > best_psnr
        if is_best:
            best_epoch = epoch
            best_psnr = psnr
            torch.save(model.state_dict(),
                       os.path.join(args.tmp_save_dir, 'model_vdsr_best.pth'))
        logging.info('best psnr: {}, best epoch: {}'.format(
            best_psnr, best_epoch))
Beispiel #6
0
def main():
    warnings.filterwarnings("ignore")
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.dataset)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    # load the pre-trained teacher model and the lightweight model
    model = Net()

    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    print("===> Setting Optimizer")
    optimizer = optim.SGD([{
        'params': model.module.parameters()
    }],
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    num = 0
    lossAarry = np.zeros(opt.nEpochs)
    pbar = tqdm(range(opt.start_epoch, opt.nEpochs + 1))
    for epoch in pbar:
        lossAarry[num] = train(training_data_loader, optimizer, model,
                               criterion, epoch)
        pbar.set_description("loss: %.8f" % (lossAarry[num]))
        pbar.update()
        num = num + 1
    pbar.close()
    save_checkpoint(model)
Beispiel #7
0
def main():

    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("path_to_dataset.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.L1Loss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay,
                           betas=(0.9, 0.999),
                           eps=1e-08)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Beispiel #8
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    # Sets the seed for generating random numbers to a non-deterministic random number.
    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)

    # This flag allows you to enable the inbuilt cudnn auto-tuner
    # to find the best algorithm to use for your hardware.
    cudnn.benchmark = True

    # Loading datasets
    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.train_path)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    # Building model
    print("===> Building model")
    model = Net(2)

    criterion = nn.SmoothL1Loss()

    print("===> Setting Optimizer")
    #TODO
    # for i in model.parameters():
    #     print(i.grad)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    model_saved_prefix = get_time_stamp(time) + opt.memo + "_model"
    saved_model_path = os.path.join("model/", model_saved_prefix)
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch, saved_model_path)
Beispiel #9
0
def main():

    cudnn.benchmark = True
    base_path = '/NSL/data/images/HyperspectralImages/ICVL/'
    # Dataset
    val_data = DatasetFromHdf5(base_path + '/testclean_si50_st80.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model
    model_path = base_path + 'hscnn_5layer_dim10_93.pkl'
    result_path = base_path + '/test_results/'
    var_name = 'rad'

    save_point = torch.load(model_path)
    model_param = save_point['state_dict']
    model = resblock(conv_relu_res_relu_block, 16, 3, 31)
    model = nn.DataParallel(model)
    model.load_state_dict(model_param)

    model = model.cuda()
    model.eval()

    model_path = base_path
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train.log')
    logger = initialize_logger(log_dir)

    test_loss = validate(val_loader, model, rrmse_loss)

    print("Test Loss: %.9f " % (test_loss))
    # save loss
    record_loss(loss_csv, test_loss)
Beispiel #10
0
def main():
    print("in loop")
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    #train_set = DatasetFromHdf5("data/train.h5")   #改下面一行自己包的H5檔案
    #train_set = DatasetFromHdf5("D:/mytestfile_41x41_all_small_x2.h5") #自己包的
    train_set = DatasetFromHdf5("D:/train.h5")  #作者的

    training_data_loader = DataLoader(
        dataset=train_set,
        num_workers=0,
        batch_size=opt.batchSize,
        shuffle=True)  #num_workers=opt.threads改成0

    print("===> Building model")
    model = Net()  #重新訓練用 若繼續續練改下一行加載pth預訓練檔

    #model = torch.load("checkpoint/model_epoch_lr01_1.pth", map_location=lambda storage, loc: storage)["model"] #預訓練檔 會報錯改下一行寫法

    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        #print("============train()前=========================")
        train(training_data_loader, optimizer, model, criterion, epoch)
        #print("============train()後=========================")
        save_checkpoint(model, epoch)
Beispiel #11
0
def main():

    cudnn.benchmark = True
    # Dataset
    train_data = DatasetFromHdf5('./Data/train_Material_.h5')
    print(len(train_data))
    val_data = DatasetFromHdf5('./Data/valid_Material_.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data,
                                   num_workers=1,
                                   batch_size=64,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model

    model = resblock(conv_bn_relu_res_block, 10, 25, 25)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()

    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 100
    init_lr = 0.0001
    iteration = 0
    record_test_loss = 1000
    # criterion_RRMSE = torch.nn.L1Loss()
    criterion_RRMSE = rrmse_loss
    criterion_Angle = Angle_Loss
    criterion_MSE = torch.nn.MSELoss()
    criterion_SSIM = pytorch_msssim.SSIM()
    # criterion_Div = Divergence_Loss
    criterion_Div = torch.nn.KLDivLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=init_lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0.01)

    model_path = './models/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss_material.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train_material.log')
    logger = initialize_logger(log_dir)

    # Resume
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch + 1, end_epoch):

        start_time = time.time()
        train_loss, iteration, lr = train(train_data_loader, model,
                                          criterion_MSE, criterion_RRMSE,
                                          criterion_Angle, criterion_SSIM,
                                          criterion_Div, optimizer, iteration,
                                          init_lr, end_epoch, epoch)
        test_loss, loss_angle, loss_reconstruct, loss_SSIM, loss_Div = validate(
            val_loader, model, criterion_MSE, criterion_RRMSE, criterion_Angle,
            criterion_SSIM, criterion_Div)

        # xxx_loss = validate_save(val_loader, model, criterion_MSE, criterion_RRMSE, epoch)

        save_checkpoint_material(model_path, epoch, iteration, model,
                                 optimizer)

        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f , Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f ,  Div Loss: %.9f"
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss,
               loss_angle, loss_reconstruct, loss_SSIM, loss_Div))

        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    test_loss)
        logger.info(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f, Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f,  Div Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss,
               loss_angle, loss_reconstruct, loss_SSIM, loss_Div))
Beispiel #12
0
    model_index = 0
    for model in [ae.e1, ae.e2]:
        model_index += 1
        if not model.fixed:
            torch.save(
                model.state_dict(),
                pjoin(weights_path,
                      "%s_%s_E%s.pth" % (TIME_ID, opt.mode, epoch)))


SHOW_INTERVAL = 100
SAVE_INTERVAL = 1000
t1 = 0
if __name__ == "__main__":
    # Set up data
    train_set = DatasetFromHdf5(opt.train_data)
    training_data_loader = DataLoader(
        dataset=train_set,
        num_workers=1,
        batch_size=opt.batch_size,
        shuffle=True
    )  # 'num_workers' need to be 1, otherwise will cause read error.

    # Set up directories and logs etc
    if opt.debug:
        opt.project = "test"  # debug means it's just a test demo
    project_path = pjoin("../Experiments", opt.project)
    rec_img_path = pjoin(project_path, "reconstructed_images")
    weights_path = pjoin(project_path, "weights")  # to save torch model
    if not opt.resume:
        if os.path.exists(project_path):
Beispiel #13
0
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

if opt.cuda and torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("./train_DIV2K_96_4.h5")
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
print("===> Building model")

model = Net(16, 16, 8, 8)
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

if torch.cuda.is_available():
    model.cuda()

criterion = nn.MSELoss()
Beispiel #14
0
    optimizer = tf.train.AdamOptimizer(learning_rate)
    opt = optimizer.minimize(loss, global_step=global_step)

    # saver = tf.train.Saver(weights, max_to_keep=0)

    # shuffle(train_list)
    config = tf.ConfigProto(
        # device_count={'GPU': 1}
    )

    saver = tf.train.Saver()

    with tf.Session(config=config) as sess:
        tf.initialize_all_variables().run()

        train_set = DatasetFromHdf5("data/train.h5")
        x = train_set.target.value.reshape((1000, IMG_SIZE[0], IMG_SIZE[0], 1))  # (1000,1,41,41) ndarray
        y = train_set.data.value.reshape((1000, IMG_SIZE[0], IMG_SIZE[0], 1))  # (1000,1,41,41) ndarray
        index = [i for i in range(1000)]

        print("\n===============START===============\n")
        for epoch in range(MAX_EPOCH):
            shuffle(index)
            train_set_x = x[index, :, :, :]
            train_set_y = y[index, :, :, :]
            for step in range(1000//BATCH_SIZE):
                offset = step*BATCH_SIZE
                input_data = train_set_x[offset:(offset+BATCH_SIZE), :, :, :]
                gt_data = train_set_y[offset:(offset+BATCH_SIZE), :, :, :]
                feed_dict = {train_input: input_data, train_gt: gt_data}
                _, curr_loss, output, g_step = sess.run([opt, loss, train_output, global_step], feed_dict=feed_dict)
Beispiel #15
0
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

if opt.cuda and torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

cudnn.benchmark = True      
print("===> Loading datasets")
train_set = DatasetFromHdf5("./train_DIV2K_96_4.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
test_set = DatasetFromHdf5("./val_DIV2K_96_4.h5")
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testbatchSize, shuffle=True)
print("===> Building model")

model = Net(16,16,8,8)
if torch.cuda.device_count() > 1:
  print("Let's use", torch.cuda.device_count(), "GPUs!")
  model = nn.DataParallel(model)

if torch.cuda.is_available():
   model.cuda()
   
criterion = nn.MSELoss()
Beispiel #16
0
def main():

    global opt, model, netContent
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    training_data_loaders = []
    random_order = [i for i in range(4)]
    for i in range(4):
        filename = "../train/DIV2K_train_320_HDF5/DIV2K_x4_Part" + str(
            i + 1) + ".h5"
        train_set = DatasetFromHdf5(filename)
        training_data_loader = DataLoader(dataset=train_set,
                                          num_workers=opt.threads, \
                                          batch_size=opt.batchSize,
                                          shuffle=True)
        training_data_loaders.append(training_data_loader)

    if opt.vgg_loss:
        print('===> Loading VGG model')
        netVGG = models.vgg19()
        netVGG.load_state_dict(
            model_zoo.load_url(
                'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'))

        class _content_model(nn.Module):
            def __init__(self):
                super(_content_model, self).__init__()
                self.feature = nn.Sequential(
                    *list(netVGG.features.children())[:-1])

            def forward(self, x):
                out = self.feature(x)
                return out

        netContent = _content_model()

    print("===> Building model")
    model = _NetG()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
        if opt.vgg_loss:
            netContent = netContent.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        shuffle(random_order)
        for idx in random_order:
            train(training_data_loaders[idx], optimizer, model, criterion,
                  epoch)
        save_checkpoint(model, epoch)
Beispiel #17
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description="PyTorch VDSR")
    parser.add_argument("--batchSize",
                        type=int,
                        default=128,
                        help="Training batch size")
    parser.add_argument("--nEpochs",
                        type=int,
                        default=50,
                        help="Number of epochs to train for")
    parser.add_argument("--lr",
                        type=float,
                        default=0.1,
                        help="Learning Rate. Default=0.1")
    parser.add_argument(
        "--step",
        type=int,
        default=10,
        help=
        "Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10"
    )
    parser.add_argument("--cuda", action="store_true", help="Use cuda?")
    parser.add_argument("--resume",
                        default="",
                        type=str,
                        help="Path to checkpoint (default: none)")
    parser.add_argument("--start-epoch",
                        default=1,
                        type=int,
                        help="Manual epoch number (useful on restarts)")
    parser.add_argument("--clip",
                        type=float,
                        default=0.4,
                        help="Clipping Gradients. Default=0.4")
    parser.add_argument(
        "--threads",
        type=int,
        default=1,
        help="Number of threads for data loader to use, Default: 1")
    parser.add_argument("--momentum",
                        default=0.9,
                        type=float,
                        help="Momentum, Default: 0.9")
    parser.add_argument("--weight-decay",
                        "--wd",
                        default=1e-4,
                        type=float,
                        help="Weight decay, Default: 1e-4")
    parser.add_argument('--pretrained',
                        default='',
                        type=str,
                        help='path to pretrained model (default: none)')
    parser.add_argument("--gpu",
                        default="0",
                        type=str,
                        help="gpu ids (default: 0)")
    parser.add_argument("--num_filter", default=64, type=int)
    parser.add_argument("--num_block", default=18, type=int)
    parser.add_argument("--train_data",
                        type=str,
                        default="../Data/train_data/train.h5")
    parser.add_argument("--test_data",
                        type=str,
                        default="../Data/test_data/Set5_mat")
    parser.add_argument("-p", "--project_name", type=str)
    parser.add_argument("--debug", action="store_true")
    parser.add_argument("--sharpen", action="store_true")
    parser.add_argument("--drop_ratio", type=float, default=0)
    opt = parser.parse_args()

    # Set up directories and logs etc
    if opt.debug:
        opt.project_name = "test"
    project_path = pjoin("../Experiments", opt.project_name)
    rec_img_path = pjoin(project_path, "reconstructed_images")
    weights_path = pjoin(project_path, "weights")  # to save torch model
    if not opt.resume:
        if os.path.exists(project_path):
            respond = "Y"  # input("The appointed project name has existed. Do you want to overwrite it (everything inside will be removed)? (y/n) ")
            if str.upper(respond) in ["Y", "YES"]:
                shutil.rmtree(project_path)
            else:
                exit(1)
        if not os.path.exists(rec_img_path):
            os.makedirs(rec_img_path)
        if not os.path.exists(weights_path):
            os.makedirs(weights_path)
    TIME_ID = os.environ["SERVER"] + time.strftime("-%Y%m%d-%H%M")
    log_path = pjoin(weights_path, "log_" + TIME_ID + ".txt")
    log = sys.stdout if opt.debug else open(log_path, "w+")
    logprint(str(opt._get_kwargs()), log)

    cuda = opt.cuda
    if cuda:
        logprint("=> use gpu id: '{}'".format(opt.gpu), log)
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    logprint("Random Seed: %s" % opt.seed, log)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    logprint("===> Loading datasets", log)
    train_set = DatasetFromHdf5(opt.train_data)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    logprint("===> Building model", log)
    model = Net(opt.num_filter, opt.num_block, opt.sharpen,
                opt.drop_ratio)  ##### creat model
    criterion = nn.MSELoss(size_average=False)

    logprint("===> Setting GPU", log)
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            logprint("=> loading checkpoint '{}'".format(opt.resume), log)
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            logprint("=> no checkpoint found at '{}'".format(opt.resume), log)

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            logprint("=> loading model '{}'".format(opt.pretrained), log)
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            logprint("=> no model found at '{}'".format(opt.pretrained), log)

    logprint("===> Setting Optimizer", log)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    logprint("===> Training", log)
    test(model, opt, log)
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch, opt,
              log)
        save_checkpoint(model, epoch, log, weights_path, TIME_ID)
        test(model, opt, log)
def main():

    global opt, model 
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/lap_pry_x4_small.h5")
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

    print('===> Building generator model')
    netG = _netG()

    print('===> Building discriminator model')    
    netD = _netD()

    print('===> Loading VGG model') 
    model_urls = {
        "vgg19": "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth"
    }

    netVGG = models.vgg19()
    netVGG.load_state_dict(model_zoo.load_url(model_urls['vgg19']))

    weight = torch.FloatTensor(64,1,3,3)
    parameters = list(netVGG.parameters())
    for i in range(64):
        weight[i,:,:,:] = parameters[0].data[i].mean(0)
    bias = parameters[1].data

    class _content_model(nn.Module):
        def __init__(self):
            super(_content_model, self).__init__()
            self.conv = conv2d = nn.Conv2d(1, 64, kernel_size=3, padding=1)
            self.feature = nn.Sequential(*list(netVGG.features.children())[1:-1])
            self._initialize_weights()

        def forward(self, x):
            out = self.conv(x)
            out = self.feature(out)
            return out

        def _initialize_weights(self):
            self.conv.weight.data.copy_(weight)
            self.conv.bias.data.copy_(bias)

    netContent = _content_model()

    print('===> Building Loss')
    criterion = L1_Charbonnier_loss()

    print("===> Setting GPU")
    if cuda:
        netG = netG.cuda()
        netD = netD.cuda()
        netContent = netContent.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            netG.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizerD = optim.RMSprop(netD.parameters(), lr = opt.lrD)
    optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1): 
        train(training_data_loader, optimizerG, optimizerD, netG, netD, netContent, criterion, epoch)
        save_checkpoint(netG, epoch)
Beispiel #19
0
def main():
    print("args:")
    print(args)
    best_psnr, best_epoch = 0, 0
    #args.seed = random.randint(1, 10000)
    args.seed = 1
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = True

    tmp_train_dataPath = os.path.join(args.tmp_data_dir, 'data/train.h5')
    tmp_val_dataPath = os.path.join(args.tmp_data_dir, 'Set5_mat')

    train_set = DatasetFromHdf5(tmp_train_dataPath)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=args.threads,
                                      batch_size=args.batchSize,
                                      shuffle=True)

    val_imglist = glob.glob(tmp_val_dataPath + "/*.*")

    assert args.pretrained != ""
    teacher = Net()
    teacher = torch.nn.DataParallel(teacher).cuda()

    teacher.load_state_dict(torch.load(args.pretrained))
    print("The architecture of Teacher:")
    print(teacher)
    psnr = validate(teacher, -1, val_imglist)
    print("The  PSNR of Teacher is ", psnr)

    ## Generator
    generator1 = GeneratorINE1(img_size=args.input_size // 2,
                               channels=1,
                               latent=args.latent).cuda()
    generator2 = GeneratorINE1(img_size=args.input_size // 3,
                               channels=1,
                               latent=args.latent).cuda()
    generator3 = GeneratorINE1(img_size=args.input_size // 4,
                               channels=1,
                               latent=args.latent).cuda()

    print("The architecture of generator: ")
    print(generator1)
    optimizer_G1 = torch.optim.Adam(generator1.parameters(), lr=args.lr_G)
    optimizer_G2 = torch.optim.Adam(generator2.parameters(), lr=args.lr_G)
    optimizer_G3 = torch.optim.Adam(generator3.parameters(), lr=args.lr_G)

    lr_schedulerG1 = optim.lr_scheduler.StepLR(optimizer_G1,
                                               step_size=10,
                                               gamma=0.1)
    lr_schedulerG2 = optim.lr_scheduler.StepLR(optimizer_G2,
                                               step_size=10,
                                               gamma=0.1)
    lr_schedulerG3 = optim.lr_scheduler.StepLR(optimizer_G3,
                                               step_size=10,
                                               gamma=0.1)

    if args.loss_type == "MSE":
        criterion = nn.MSELoss(reduction='sum')
    else:
        criterion = nn.L1Loss(reduction='sum')

    model_epoch_list = [i * args.inc_step for i in range(args.inc_num)]
    epoch_index = 0
    print("model_epoch_list: ", )
    for epoch in range(args.start_epoch, args.nEpochs + 1):

        if epoch_index < len(model_epoch_list
                             ) and epoch == 1 + model_epoch_list[epoch_index]:
            epoch_index += 1
            best_psnr = -1
            if args.model_type == "origin":
                model = Net(block_num=min(18, epoch * 18 // args.inc_num))
            else:
                model = NetHalf(block_num=min(18, epoch * 18 // args.inc_num))

            model = torch.nn.DataParallel(model)
            model = model.cuda()
            for name, param in model.named_parameters():
                print(name)
            if epoch_index > 1:
                save_data = torch.load(
                    os.path.join(args.tmp_save_dir,
                                 'model_vdsr_{}.pth'.format(epoch - 1)))
                model.load_state_dict(save_data, strict=False)
                print("Load model from {}".format(
                    os.path.join(args.tmp_save_dir,
                                 'model_vdsr_{}.pth'.format(epoch - 1))))
            criterion = criterion.cuda()

            optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
            lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                     step_size=10,
                                                     gamma=0.1)

        logging.info('current epoch {}, lr {:.5e}'.format(
            epoch, optimizer.param_groups[0]['lr']))
        train(training_data_loader, optimizer, model, criterion, epoch,
              teacher, generator1, generator2, generator3, optimizer_G1,
              optimizer_G2, optimizer_G3)
        plot(epoch, lossList, 'mseLoss')
        lr_scheduler.step()
        lr_schedulerG1.step()
        lr_schedulerG2.step()
        lr_schedulerG3.step()
        psnr = validate(model, epoch, val_imglist)
        plot(epoch, psnrList[1:], 'psnr')
        is_best = psnr > best_psnr
        if is_best:
            best_epoch = epoch
            best_psnr = psnr
            torch.save(model.state_dict(),
                       os.path.join(args.tmp_save_dir, 'model_vdsr_best.pth'))
        torch.save(
            model.state_dict(),
            os.path.join(args.tmp_save_dir, 'model_vdsr_{}.pth'.format(epoch)))
        torch.save(
            generator1.state_dict(),
            os.path.join(args.tmp_save_dir,
                         'generator1_vdsr_{}.pth'.format(epoch)))
        torch.save(
            generator2.state_dict(),
            os.path.join(args.tmp_save_dir,
                         'generator2_vdsr_{}.pth'.format(epoch)))
        torch.save(
            generator3.state_dict(),
            os.path.join(args.tmp_save_dir,
                         'generator3_vdsr_{}.pth'.format(epoch)))
        logging.info('best psnr: {}, best epoch: {}'.format(
            best_psnr, best_epoch))
        source = os.path.join(args.tmp_save_dir)
        target = os.path.join(args.train_url)
Beispiel #20
0
torch.manual_seed(seed1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

np.random.seed(seed2)
#-----------------------------------------------------------------------------
#          -Load Data-
#-----------------------------------------------------------------------------
#data path should include the h5files for the dataset with 3 dictionares:'INPUT','TARGET','CLEAN'(optional for single mode)
train_files = './SDL_Dir/Train_data';
bs =3# batch size
# train = torch.utils.data.TensorDataset(X_Train, Y_Train,X_Clean)
# trainloader = torch.utils.data.DataLoader(train, batch_size=bs, shuffle=False)

#if you don't use clean samples in the network set yes=0
train_set = DatasetFromHdf5(os.walk(train_files),yes=1)
trainloader =torch.utils.data.DataLoader(dataset=train_set, num_workers=1, batch_size=bs, shuffle=True)
#The arhcitecture:
mode='SDL'
if mode='SDL':
  e,W,N=utils.generate_filters()
  model = final(e,W[5:10,:,:,:],N[10:20,:,:,:])
else:
  model = CoordRegressionNetwork(n_locations=4)

optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10)
#If you want to load a pre-trained model
load =True
# Path to pretrained model
model_path= './SDL_Dir/checkpoints/SDL_mixed.pth'
Beispiel #21
0
np.random.seed(SEED)

opt.num_source = opt.angular_in * opt.angular_in
model_dir = 'model_{}_S{}_epi{}_lr{}_step{}x{}'.format(opt.dataset,
                                                       opt.num_source, opt.epi,
                                                       opt.lr, opt.step,
                                                       opt.reduce)

if not os.path.exists(model_dir):
    os.makedirs(model_dir)

#--------------------------------------------------------------------------#
# Data loader
print('===> Loading datasets')
# dataset_path = join('LFData', 'train_{}.h5'.format(opt.dataset))
train_set = DatasetFromHdf5(opt)
train_loader = DataLoader(dataset=train_set,
                          batch_size=opt.batch_size,
                          shuffle=True)
print('loaded {} LFIs from {}'.format(len(train_loader), opt.dataset_path))
#--------------------------------------------------------------------------#
# Build model
print("building net")
model = Net(opt).to(device)
#-------------------------------------------------------------------------#
# optimizer and loss logger
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=opt.lr)
scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      step_size=opt.step,
                                      gamma=opt.reduce)
Beispiel #22
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    save_path = os.path.join('.', "model", "{}_{}".format(opt.model, opt.ID))
    log_dir = './records/{}_{}/'.format(opt.model, opt.ID)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    # opt.seed = 4222
    coeff_mse = opt.coeff_totalloss
    coeff_J = opt.coeff_J
    print("Random Seed: ", opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.traindata, opt.patchSize, opt.aug)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    if opt.model == 'dense':
        model = Dense()
    else:
        raise ValueError("no known model of {}".format(opt.model))
    criterion = nn.MSELoss()
    Absloss = nn.L1Loss()
    ssim_loss = pytorch_msssim.MSSSIM()

    #loss_var = torch.std()
    if opt.freeze:
        model.freeze_pretrained()

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()
        Absloss = Absloss.cuda()
        ssim_loss = ssim_loss.cuda()
        #loss_var = loss_var.cuda()
        vgg = Vgg16(requires_grad=False).cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("===> loading checkpoint: {}".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("===> no checkpoint found at {}".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            pretrained_dict = torch.load(opt.pretrained)['model'].state_dict()
            print("===> load model {}".format(opt.pretrained))
            model_dict = model.state_dict()
            # filter out unnecessary keys
            pretrained_dict = {
                k: v
                for k, v in pretrained_dict.items() if k in model_dict
            }
            print("\t...loaded parameters:")
            for k, v in pretrained_dict.items():
                print("\t\t+{}".format(k))
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
            # weights = torch.load(opt.pretrained)
            # model.load_state_dict(weights['model'].state_dict())
        else:
            print("===> no model found at {}".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(
        model.parameters(), lr=opt.lr,
        weight_decay=opt.weight_decay)  #weight_decay=opt.weight_decay

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):

        # Evaluate validation dataset and save images
        if epoch % 1 == 0:
            save_val_path = os.path.join('test', opt.model + '_' + opt.ID)
            checkdirctexist(save_val_path)
            image_list = glob.glob(os.path.join(opt.valdataset, '*.png'))
            for image_name in image_list:
                print("Processing ", image_name)
                img = cv2.imread(image_name)
                img = img.astype(np.float32)
                H, W, C = img.shape
                P = 512
                print("\t\tBreak image into patches of {}x{}".format(P, P))

                Wk = W
                Hk = H
                if W % 32:
                    Wk = W + (32 - W % 32)
                if H % 32:
                    Hk = H + (32 - H % 32)
                    img = np.pad(img, ((0, Hk - H), (0, Wk - W), (0, 0)),
                                 'reflect')
                    im_input = img / 255.0
                    im_input = np.expand_dims(np.rollaxis(im_input, 2), axis=0)
                    im_input_rollback = np.rollaxis(im_input[0], 0, 3)
                    with torch.no_grad():
                        im_input = Variable(torch.from_numpy(im_input).float())
                        im_input = im_input.cuda()
                        model.eval()

                        J, J1, J2, J3, w1, w2, w3 = model(im_input, opt)
                        im_output = J

                    im_output = im_output.cpu()
                    im_output_forsave = get_image_for_save(im_output)
                    J1_output = J1.cpu()
                    J1_output_forsave = get_image_for_save(J1_output)
                    J2_output = J2.cpu()
                    J2_output_forsave = get_image_for_save(J2_output)
                    J3_output = J3.cpu()
                    J3_output_forsave = get_image_for_save(J3_output)
                    W1_output = w1.cpu()
                    W1_output_forsave = get_image_for_save(W1_output)
                    W2_output = w2.cpu()
                    W2_output_forsave = get_image_for_save(W2_output)
                    W3_output = w3.cpu()
                    W3_output_forsave = get_image_for_save(W3_output)

                    path, filename = os.path.split(image_name)

                    im_output_forsave = im_output_forsave[0:H, 0:W, :]
                    J1_output_forsave = J1_output_forsave[0:H, 0:W, :]
                    J2_output_forsave = J2_output_forsave[0:H, 0:W, :]
                    J3_output_forsave = J3_output_forsave[0:H, 0:W, :]
                    W1_output_forsave = W1_output_forsave[0:H, 0:W, :]
                    W2_output_forsave = W2_output_forsave[0:H, 0:W, :]
                    W3_output_forsave = W3_output_forsave[0:H, 0:W, :]

                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_IM_{}".format(epoch - 1, filename)),
                        im_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_J1_{}".format(epoch - 1, filename)),
                        J1_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_J2_{}".format(epoch - 1, filename)),
                        J2_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_J3_{}".format(epoch - 1, filename)),
                        J3_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_W1_{}".format(epoch - 1, filename)),
                        W1_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_W2_{}".format(epoch - 1, filename)),
                        W2_output_forsave)
                    cv2.imwrite(
                        os.path.join(save_val_path,
                                     "{}_W3_{}".format(epoch - 1, filename)),
                        W3_output_forsave)
        train(training_data_loader, optimizer, model, criterion, Absloss,
              ssim_loss, epoch, vgg)
        save_checkpoint(model, epoch, save_path)
Beispiel #23
0
def main():

    global opt, model, netContent
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(
        "/path/to/your/hdf5/data/like/rgb_srresnet_x4.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    if opt.vgg_loss:
        print('===> Loading VGG model')
        netVGG = models.vgg19()
        netVGG.load_state_dict(
            model_zoo.load_url(
                'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'))

        class _content_model(nn.Module):
            def __init__(self):
                super(_content_model, self).__init__()
                self.feature = nn.Sequential(
                    *list(netVGG.features.children())[:-1])

            def forward(self, x):
                out = self.feature(x)
                return out

        netContent = _content_model()

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
        if opt.vgg_loss:
            netContent = netContent.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Beispiel #24
0
def main():

    cudnn.benchmark = True
    base_path = '/NSL/data/images/HyperspectralImages/ICVL/'
    # Dataset
    train_data = DatasetFromHdf5(base_path + '/train.h5')
    print(len(train_data))
    val_data = DatasetFromHdf5(base_path + '/valid.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data,
                                   num_workers=1,
                                   batch_size=64,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model
    model = resblock(conv_batch_relu_res_block, 16, 3, 31)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()

    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 1000
    init_lr = 0.0002
    iteration = 0
    record_test_loss = 1000
    criterion = rrmse_loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=init_lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0)

    model_path = base_path + '/models/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train.log')
    logger = initialize_logger(log_dir)

    # Resume
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch + 1, end_epoch):

        print("epoch [%d]" % (epoch))
        start_time = time.time()
        train_loss, iteration, lr = train(train_data_loader, model, criterion,
                                          optimizer, iteration, init_lr,
                                          end_epoch)
        print("train done! epoch [%d]" % (epoch))
        test_loss = validate(val_loader, model, criterion)
        print("test done! epoch [%d]" % (epoch))

        # Save model
        if test_loss < record_test_loss:
            record_test_loss = test_loss
            save_checkpoint(model_path, epoch, iteration, model, optimizer)

        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss))
        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    test_loss)
        logger.info(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss))
Beispiel #25
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/train_291_32_x234.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = DRRN(opt)
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        # print ("====> doesn't parallel!!!!")
        # model = model.cuda() # ont thread for debug
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("===> loading checkpoint: {}".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("===> no checkpoint found at {}".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("===> load model {}".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("===> no model found at {}".format(opt.pretrained))

    print("===> Setting Optimizer")
    if opt.optimizer == "SGD":
        optimizer = optim.SGD(model.parameters(),
                              lr=opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adam":
        optimizer = optim.Adam(model.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay)
    else:
        print("===> optimizer error!!!")
    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
    os.system(
        "python eval.py --cuda --model=model/model_epoch_{}.pth".format(epoch))
Beispiel #26
0
def main():
    #https://drive.google.com/file/d/1QxQxf2dzfSbvCgWlI9VuxyBgfmQyCmfE/view?usp=sharing - train data
    #https://drive.google.com/file/d/11INkjd_ajT-RSCSFqfB7reLI6_m1jCAC/view?usp=sharing - val data
    #https://drive.google.com/file/d/1m0EZaRjla2o_eL3hOd7UMkSwoME5mF4A/view?usp=sharing - extra val data
    cudnn.benchmark = True
  #  train_data = DatasetFromHdf5('C:/Users/alawy/Desktop/Training/Training-shadesofgrey/train_tbands.h5')
    train_data = DatasetFromHdf5('/storage/train_cropped14.h5')

    print(len(train_data))
    val_data_extra = DatasetFromHdf5('/storage/valid_extra99.h5')
    val_data = DatasetFromHdf5('/storage/valid_cropped89.h5')
    new_val=[]
    new_val.append(val_data)
    new_val.append(val_data_extra)
    print(len(new_val))
    print('con')
    val_new = data.ConcatDataset(new_val)
    print(len(val_new))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data, 
                                   num_workers=4,  
                                   batch_size=512,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_new,
                            num_workers=1, 
                            batch_size=1,
                            shuffle=False,
                           pin_memory=True)
    # Dataset
   # torch.set_num_threads(12)
    # Model               
    model = resblock(conv_relu_res_relu_block, 16, 3, 25)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model = model.to('cuda')
    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 1000
    init_lr = 0.0002
    iteration = 0
    record_test_loss = 1000
    criterion = rrmse_loss
    #optimizer=torch.optim.AdamW(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
    optimizer=torch.optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01)
   # model_path = '/storage/models-crop/'
    model_path = './models-crop/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path,'loss.csv'), 'w+')
    
    log_dir = os.path.join(model_path,'train.log')
    logger = initialize_logger(log_dir)
    
    # Resume
    resume_file = ''
    #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models/hscnn_5layer_dim10_69.pkl' 
    #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models-crop/hscnn_5layer_dim10_95.pkl'
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
       
    for epoch in range(start_epoch+1, end_epoch):
        
        start_time = time.time()         
        train_loss, iteration, lr = train(train_data_loader, model, criterion, optimizer, iteration, init_lr, end_epoch)
        test_loss = validate(val_loader, model, criterion)
        
 
        
        # Save model
        if test_loss < record_test_loss:
            record_test_loss = test_loss
            save_checkpoint(model_path, epoch, iteration, model, optimizer)
        else:
            save_checkpoint(model_path, epoch, iteration, model, optimizer)
        # print loss 
        end_time = time.time()
        epoch_time = end_time - start_time
        print ("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss))
        # save loss
        record_loss(loss_csv,epoch, iteration, epoch_time, lr, train_loss, test_loss)     
        logger.info("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss))
        gc.collect()
Beispiel #27
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True
        
    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.train_path)
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

    print("===> Building model")
    model = Net()
    ################Loss function!!!!!!!!
    # criterion = nn.MSELoss(size_average=True)
    criterion = nn.SmoothL1Loss()
    ################
    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model, device_ids=list(range(cuda))).cuda()
        criterion = criterion.cuda()
        print("=======>Using GPU :%s"%range(cuda))
    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))  

    print("===> Setting Optimizer")
    #TODO
    # for i in model.parameters():
    #     print(i.grad)
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
#    optimizer=optim.Adam(model.parameters(),lr=0.01)
#    optimizer=optim.Adam(model.parameters(),lr=0.001)

    print("===> Training")
    model_saved_prefix = get_time_stamp(time) + opt.memo + "_model"
    saved_model_path = os.path.join("model/", model_saved_prefix)
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):        
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch, saved_model_path)
def main():
    warnings.filterwarnings("ignore")
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.dataset)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    # load the pre-trained teacher model and the lightweight model
    tea_model = torch.load(opt.tea)["model"]
    pre_model = torch.load(opt.premodel)["model"]

    Cn = opt.width
    model = acSRNet(Cn)

    # with the pre-trained model, the training of AIL will be more stable
    model.input.weight.data = copy.deepcopy(pre_model.module.input.weight.data)
    model.output.weight.data = copy.deepcopy(
        pre_model.module.output.weight.data)
    model.residual_layer.load_state_dict(
        pre_model.module.residual_layer.state_dict())

    criterion_init = initLoss(size_average=False)
    criterion_ail = ailLoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion_init = criterion_init.cuda()
        criterion_ail = criterion_ail.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        model_resume = "model/" + "ours_ail_r{}_f{}_s{}.pth".format(
            opt.resume - 1, opt.width, opt.scale)
        if os.path.isfile(model_resume):
            print("=> loading checkpoint '{}'".format(model_resume))
            checkpoint = torch.load(model_resume)
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))
            exit(0)

    print("===> Setting Optimizer")
    optimizer = optim.SGD([{
        'params': model.module.parameters()
    }],
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    lossAarry = np.zeros(opt.nEpochs * opt.round)

    num = 0
    if not opt.resume:
        for curr in range(opt.round):
            reset_learning_rate(optimizer)
            if curr < 1:
                # for epoch in range(opt.start_epoch, opt.nEpochs + 1):
                pbar = tqdm(range(opt.start_epoch, opt.nEpochs + 1))
                for epoch in pbar:
                    lossAarry[num] = train_init(training_data_loader,
                                                optimizer, model, tea_model,
                                                criterion_init, epoch)
                    pbar.set_description("loss: %.8f" % (lossAarry[num]))
                    pbar.update()
                    num = num + 1
                pbar.close()

            else:
                pre_learned = copy.deepcopy(model)
                #for epoch in range(opt.start_epoch, opt.nEpochs + 1):
                pbar = tqdm(range(opt.start_epoch, opt.nEpochs + 1))
                for epoch in pbar:
                    lossAarry[num] = train_ail(training_data_loader, optimizer,
                                               model, tea_model, criterion_ail,
                                               epoch, pre_learned, curr)
                    pbar.set_description("loss: %.8f" % (lossAarry[num]))
                    pbar.update()
                    num = num + 1

                pbar.close()

            save_checkpoint(model, curr)
    else:
        for curr in range(opt.round - opt.resume):
            reset_learning_rate(optimizer)
            pre_learned = copy.deepcopy(model)
            # for epoch in range(opt.start_epoch, opt.nEpochs + 1):
            pbar = tqdm(range(opt.start_epoch, opt.nEpochs + 1))
            for epoch in pbar:
                lossAarry[num] = train_ail(training_data_loader, optimizer,
                                           model, tea_model, criterion_ail,
                                           epoch, pre_learned,
                                           curr + opt.resume)
                pbar.set_description("loss: %.8f" % (lossAarry[num]))
                pbar.update()
                num = num + 1

            pbar.close()

            save_checkpoint(model, curr + opt.resume)
def main():

    global opt, model, netContent
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("srresnet_x4.h5")
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, \
        batch_size=opt.batchSize, shuffle=True)

    # if opt.vgg_loss:
    #     print('===> Loading VGG model')
    #     netVGG = models.vgg19()
    #     netVGG.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'))
    #     class _content_model(nn.Module):
    #         def __init__(self):
    #             super(_content_model, self).__init__()
    #             self.feature = nn.Sequential(*list(netVGG.features.children())[:-1])

    #         def forward(self, x):
    #             out = self.feature(x)
    #             return out

    #     netContent = _content_model()

    print("===> Building model")
    model_G = _NetG()  #changed
    model_D = _NetD()  #changed
    criterion = nn.MSELoss()
    criterion_D = nn.CrossEntropy()  #changed

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
        if opt.vgg_loss:
            netContent = netContent.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer_g = optim.Adam(model_G.parameters(), lr=opt.lr)  #changed
    optimizer_d = optim.Adadelta(model_D.parameters())  #changed

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Beispiel #30
0
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#--------------------------------------------------------------------------#
torch.manual_seed(1)
model_dir = 'model_x{}_{}{}x{}_lr{}_step{}x{}_l{}'.format(
    opt.scale, opt.dataset, opt.angular_num, opt.angular_num, opt.lr, opt.step,
    opt.reduce, opt.layer_num)
if not os.path.exists(model_dir):
    os.makedirs(model_dir)

an = opt.angular_num
#--------------------------------------------------------------------------#
# Data loader
print('===> Loading datasets')
dataset_path = os.path.join('LFData', 'train_{}.h5'.format(opt.dataset))
train_set = DatasetFromHdf5(dataset_path, opt.scale, opt.patch_size)
train_loader = DataLoader(dataset=train_set,
                          batch_size=opt.batch_size,
                          shuffle=True)
print('loaded {} LFIs from {}'.format(len(train_loader), dataset_path))
#--------------------------------------------------------------------------#
# Build model
print("===> building network")
srnet_name = 'net{}x'.format(opt.scale)
model = eval(srnet_name)(an, opt.layer_num).to(device)
#-------------------------------------------------------------------------#
# optimizer and loss logger
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=opt.lr)
scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      step_size=opt.step,