Exemple #1
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument("--batchSize", type=int, default=128, help="Training batch size. Default 128")
    parser.add_argument("--Epochs", type=int, default=50, help="Number of epochs to train for")
    parser.add_argument("--lr", type=float, default=0.1, help="Learning Rate. Default=0.1")
    parser.add_argument("--step", type=int, default=10,
                        help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")
    parser.add_argument("--start-epoch", default=1, type=int, help="Manual epoch number")
    parser.add_argument("--cuda", action="store_true", help="Use cuda?")
    parser.add_argument("--clip", type=float, default=0.4, help="Clipping Gradients. Default=0.4")
    parser.add_argument("--threads", type=int, default=1, help="Number of threads for data loader to use, Default: 1")
    parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
    parser.add_argument("--weight-decay", default=1e-4, type=float, help="Weight decay, Default: 1e-4")
    parser.add_argument("--pretrained", default='', type=str, help="Path to pretrained model")
    parser.add_argument("--train_data", required=True, type=str, help="Path to preprocessed train dataset")
    parser.add_argument("--test_data", default="./assets/", type=str, help="Path to file containing test images")
    args = parser.parse_args()

    cuda = args.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(0))
        os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        if not torch.cuda.is_available():
            raise Exception("No GPU found or Wrong gpu id, please run without --cuda")

    cudnn.benchmark = True

    train_set = prepareDataset("data/train.h5")
    train_data = DataLoader(dataset=train_set, num_workers=args.threads, batch_size=args.batchSize, shuffle=True)

    model = VDSR()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = nn.MSELoss(size_average=False)
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    if args.pretrained:
        if os.path.isfile(args.pretrained):
            print("=> loading model '{}'".format(args.pretrained))
            checkpoint = torch.load(args.pretrained)
            args.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint['model'].state_dict())
        else:
            print("No model found at '{}'".format(opt.pretrained))

    train(args.start_epoch, train_data, optimizer, model, criterion, args.Epochs, args)
    eval(model, args)
Exemple #2
0
def main():
    global opt
    opt = parser.parse_args()
    opt.gpuids = list(map(int, opt.gpuids))

    print(opt)

    if opt.cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    cudnn.benchmark = True

    if not opt.test:
        train_set = get_training_set(opt.dataset, opt.crop_size,
                                     opt.upscale_factor, opt.add_noise,
                                     opt.noise_std)
        validation_set = get_validation_set(opt.dataset, opt.crop_size,
                                            opt.upscale_factor)

    # test_set = get_test_set(
    #     opt.dataset, opt.crop_size, opt.upscale_factor)

    if not opt.test:
        training_data_loader = DataLoader(dataset=train_set,
                                          num_workers=opt.threads,
                                          batch_size=opt.batch_size,
                                          shuffle=True)
        validating_data_loader = DataLoader(dataset=validation_set,
                                            num_workers=opt.threads,
                                            batch_size=opt.test_batch_size,
                                            shuffle=False)

    # testing_data_loader = DataLoader(
    #     dataset=test_set, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False)

    model = VDSR()
    criterion = nn.MSELoss()

    if opt.cuda:
        torch.cuda.set_device(opt.gpuids[0])
        with torch.cuda.device(opt.gpuids[0]):
            model = model.cuda()
            criterion = criterion.cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay)
    #     optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)

    # if opt.test:
    #     model_name = join("model", opt.model)
    #     model = torch.load(model_name)
    #     start_time = time.time()
    #     test(model, criterion, testing_data_loader)
    #     elapsed_time = time.time() - start_time
    #     print("===> average {:.2f} image/sec for test".format(
    #         100.0/elapsed_time))
    #     return

    train_time = 0.0
    validate_time = 0.0
    for epoch in range(1, opt.epochs + 1):
        start_time = time.time()
        train(model, criterion, epoch, optimizer, training_data_loader)
        elapsed_time = time.time() - start_time
        train_time += elapsed_time
        #         print("===> {:.2f} seconds to train this epoch".format(
        #             elapsed_time))
        start_time = time.time()
        validate(model, criterion, validating_data_loader)
        elapsed_time = time.time() - start_time
        validate_time += elapsed_time
        #         print("===> {:.2f} seconds to validate this epoch".format(
        #             elapsed_time))
        if epoch % 10 == 0:
            checkpoint(model, epoch)

    print("===> average training time per epoch: {:.2f} seconds".format(
        train_time / opt.epochs))
    print("===> average validation time per epoch: {:.2f} seconds".format(
        validate_time / opt.epochs))
    print("===> training time: {:.2f} seconds".format(train_time))
    print("===> validation time: {:.2f} seconds".format(validate_time))
    print("===> total training time: {:.2f} seconds".format(train_time +
                                                            validate_time))
    args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))

    if not os.path.exists(args.outputs_dir):
        os.makedirs(args.outputs_dir)

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    torch.manual_seed(args.seed)

    model = VDSR().to(device)

    criterion = nn.MSELoss()

    torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)

    train_dataset = TrainDataset(args.train_file)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)
    eval_dataset = EvalDataset(args.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    for epoch in range(args.num_epochs):
        model.train()
        loss_sum = 0
Exemple #4
0
#model_level1 = torch.load(os.path.join('./SR_v2', "model_epoch_200.pth"))
model = VDSR(curL_in=curL_in,filter_num = opt.f_size)#ConvertNet(curL_in=50,receptive_size=4)
#model = model.load_state_dict(os.path.join(path, "model_epoch_200.pth"))
Hmodel.init_weight_h()
Lmodel.init_weight_l()
criterion2 = nn.MSELoss()
criterion = nn.MSELoss(size_average=False)
    
if cuda:
    Hmodel = Hmodel.cuda()
    Lmodel = Lmodel.cuda()
    model = model.cuda()
    #model_level1 = model_level1.cuda()
    criterion = criterion.cuda()
    criterion2 = criterion2.cuda()
optimizer = optim.Adam(model.parameters(),lr=opt.lr)    
#Hoptimizer = optim.Adam([{'params': Hmodel.conv1_b.parameters(),'lr':0},{'params': Hmodel.conv2_b.parameters(),'lr': 0},
#                          {'params': Hmodel.conv3_b.parameters(),'lr': 0}], lr=opt.lr)
#Loptimizer = optim.Adam([{'params': Lmodel.conv1_b.parameters(),'lr':0},{'params': Lmodel.conv2_b.parameters(),'lr': 0},
#                          {'params': Lmodel.conv3_b.parameters(),'lr': 0}], lr=opt.lr)


def to_img(x,depth):
    #x = 0.5 * (x + 1)
    x = x.clamp(0, 1)
    x = x.view(x.size(0), depth, 32, 32)
    return x
    
def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(train_dataloader, 1):
from tensorboardX import SummaryWriter
from model import VDSR

device=torch.device('cuda:0')
writer=SummaryWriter('D:/VDSR')

transform=T.ToTensor()

trainset=DatasetFromFolder('D:/train_data/291',transform=transform)
trainLoader=DataLoader(trainset,batch_size=128,shuffle=True)


net=VDSR()
net=net.to(device)

optimizer=optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=1e-4)
scheduler=optim.lr_scheduler.StepLR(optimizer,step_size=10,gamma=0.1)
criterion=nn.MSELoss()
criterion=criterion.to(device)

net.train()
for epoch in range(20):

    running_cost=0.0
    for i,data in enumerate (trainLoader,0):
        input,target=data
        input,target=input.to(device),target.to(device)
        optimizer.zero_grad()
        output=net(input)
        loss=criterion(output,target)
        loss.backward()