Beispiel #1
0
def main():

    dataloaders = myDataloader()
    train_loader = dataloaders.getTrainLoader(batch_size)

    model = SRCNN().cuda()
    model.train()

    optimizer = optim.Adam(model.parameters(), lr=lr)
    mse_loss = nn.MSELoss()

    for ep in range(epoch):
        running_loss = 0.0
        for i, (pic, blurPic, _) in enumerate(train_loader):
            pic = pic.cuda()
            blurPic = blurPic.cuda()
            optimizer.zero_grad()
            out = model(blurPic)
            loss = mse_loss(out, pic)
            loss.backward()
            optimizer.step()

            running_loss += loss
            if i % 10 == 9:
                print('[%d %d] loss: %.4f' %
                      (ep + 1, i + 1, running_loss / 20))
                running_loss = 0.0
        if ep % 10 == 9:
            torch.save(model.state_dict(),
                       f="./result/train/" + str(ep + 1) + "srcnnParms.pth")
    print("finish training")
Beispiel #2
0
def main():
    global opt
    opt = parser.parse_args()
    opt.gpuids = list(map(int, opt.gpuids))

    print(opt)

    if opt.cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")
    cudnn.benchmark = True

    train_set = get_training_set(opt.upscale_factor, opt.add_noise,
                                 opt.noise_std)
    validation_set = get_validation_set(opt.upscale_factor)
    test_set = get_test_set(opt.upscale_factor)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batch_size,
                                      shuffle=True)
    validating_data_loader = DataLoader(dataset=validation_set,
                                        num_workers=opt.threads,
                                        batch_size=opt.test_batch_size,
                                        shuffle=False)
    testing_data_loader = DataLoader(dataset=test_set,
                                     num_workers=opt.threads,
                                     batch_size=opt.test_batch_size,
                                     shuffle=False)

    model = SRCNN()
    criterion = nn.MSELoss()

    if opt.cuda:
        torch.cuda.set_device(opt.gpuids[0])
        with torch.cuda.device(opt.gpuids[0]):
            model = model.cuda()
            criterion = criterion.cuda()
        model = nn.DataParallel(model,
                                device_ids=opt.gpuids,
                                output_device=opt.gpuids[0])

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    if opt.test:
        model_name = join("model", opt.model)
        model = torch.load(model_name)
        model = nn.DataParallel(model,
                                device_ids=opt.gpuids,
                                output_device=opt.gpuids[0])
        start_time = time.time()
        test(model, criterion, testing_data_loader)
        elapsed_time = time.time() - start_time
        print("===> average {:.2f} image/sec for processing".format(
            100.0 / elapsed_time))
        return

    for epoch in range(1, opt.epochs + 1):
        train(model, criterion, epoch, optimizer, training_data_loader)
        validate(model, criterion, validating_data_loader)
        if epoch % 10 == 0:
            checkpoint(model, epoch)
Beispiel #3
0
def train(training_data, dev_data, args):
    training_gen = data.DataLoader(training_data, batch_size=2)
    dev_gen = data.DataLoader(dev_data, batch_size=2)
    device = torch.device('cuda' if cuda.is_available() else 'cpu')
    print('Initializing model')
    model = SRCNN()
    loss = RMSE()
    if cuda.device_count() > 1:
        print('Using %d CUDA devices' % cuda.device_count())
        model = nn.DataParallel(
            model, device_ids=[i for i in range(cuda.device_count())])
    model.to(device)
    loss.to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    def _train(data, opt=True):
        total = 0
        for y, x in data:
            y, x = y.to(device), x.to(device)
            pred_y = model(x)
            l = loss(pred_y, y)
            total += l.item()
            if opt:
                optimizer.zero_grad()
                l.backward()
                optimizer.step()
        cuda.synchronize()
        return total

    print('Training')
    for ep in range(args.ep):
        train_loss = _train(training_gen)
        dev_loss = _train(dev_gen, opt=False)
        print_flush('Epoch %d: Train %.4f Dev %.4f' %
                    (ep, train_loss, dev_loss))
        if ep % 50 == 0:
            save_model(model, args.o)
    return model
Beispiel #4
0
try:
    from tensorboardX import SummaryWriter
except ImportError as reason:
    print('Use pip install tensorboardX', reason)
else:
    writer = SummaryWriter(log_dir='writer_log', comment='--')
    print('================Use TensorBoard ================')

#=================================================

if Evaluation:
    validate(test_path, model)
else:
    train_dataset = SRCNN_dataset(train_config)
    criterion = nn.MSELoss().cuda()
    optimizer_adam = optim.Adam(model.parameters(), lr=train_config['lr'])
    train_dataset = SRCNN_dataset(train_config)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=train_config['batch_size'])
    #==========================================================================================
    for _epoch in range(Start_epoch, End_epoch):
        loss_avg = train(train_loader,
                         model,
                         criterion,
                         optimizer_adam,
                         _epoch,
                         Writer=writer)  #将train过程封装成函数,这样使整体代码结构清晰
        save_state = {
            'epoch': _epoch,  #存储网络的时候不要只存储state_dict(),要把一些关键参数都存进去
            'lr': train_config['lr'],
            'state': model.state_dict()
Beispiel #5
0
from data_loader import SRCNN_dataset
from model import SRCNN
from solver import train

train_config = {
    'dir_path': 'Train',
    'scale': 3,
    'is_gray': True,
    'input_size': 33,
    'label_size': 21,
    'stride': 21
}

test_config = train_config.copy()
test_config['dir_path'] = 'Test/Set5'

train_dataset = SRCNN_dataset(train_config)
model = SRCNN()
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
#train(train_dataset, model, loss_fn, 
#      optimizer, num_epochs=4, batch_size=128)

test_dataset = SRCNN_dataset(test_config)
test_loader = DataLoader(test_dataset, 125, True, 4)

for _, (input_sample, label_sample) in enumerate(test_loader):
        print(input_sample.size())
        break

Beispiel #6
0
trainloader = DataLoader(trainset,
                         batch_size=BATCH_SIZE,
                         sampler=RandomSampler(trainset,
                                               replacement=True,
                                               num_samples=32 * BATCH_SIZE),
                         num_workers=NUM_WORKERS)
testloader = DataLoader(testset,
                        batch_size=BATCH_SIZE,
                        sampler=RandomSampler(testset,
                                              replacement=True,
                                              num_samples=1024),
                        num_workers=NUM_WORKERS)

model = SRCNN().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters())


def get_psnr(mse):
    return -10 * log10(mse)


def test():
    avg_loss = 0
    model.eval()
    with torch.no_grad():
        for input, target in tqdm(testloader, desc='test', leave=False):
            input, target = input.to(device), target.to(device)
            with torch.no_grad():
                out = model(input)
                loss = criterion(out, target)
Beispiel #7
0
# device
if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")
device = torch.device(
    "cuda" if opt.cuda and torch.cuda.is_available() else "cpu")
map_location = "cuda:0" if opt.cuda else device
# model
model = SRCNN()
if checkpoint:
    checkpoint_ = load_checkpoint(opt.checkpoint, map_location)
    model.load_state_dict(checkpoint_["model_state_dict"])

model.to(device)

# optimizer
optimizer = optim.AdamW(model.parameters(), lr=opt.lr)
if checkpoint:
    optimizer.load_state_dict(checkpoint_["optimizer_state_dict"])

# data
train_set = SRDataset(collect_data(data_path["train"], progress=True))
test_set = SRDataset(collect_data(data_path["test"]))

train_data_loader = torch.utils.data.DataLoader(dataset=train_set,
                                                batch_size=opt.batch_size,
                                                shuffle=True,
                                                num_workers=num_workers)
test_data_loader = torch.utils.data.DataLoader(dataset=test_set,
                                               batch_size=opt.batch_size,
                                               shuffle=False,
                                               num_workers=num_workers)
Beispiel #8
0
train_dataset = SR_dataset(
  lr_path = lr_path,
  hr_path = hr_path,
  transform = transform,
  interpolation_mode=Config.interpolation_mode,
  interpolation_scale=Config.interpolation_scale
)

train_loader = DataLoader(
  train_dataset,
  batch_size = Config.batch_size,
  shuffle =True
)

model = SRCNN().to(DEVICE)
optimizer =torch.optim.Adam(model.parameters(), lr = Config.lr)


epochs = Config.epochs
model.train()
for epoch in range(epochs):
  print("{}/{} EPOCHS".format(epoch+1, epochs))
  for x,y in tqdm(train_loader):
    x = x.to(DEVICE)

    y = y.to(DEVICE)
    pred = model(x)

    loss = torch.nn.functional.mse_loss(pred, y)

    optimizer.zero_grad()
Beispiel #9
0
                                  batch_size=opt.batch_size,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.test_batch_size,
                                 shuffle=False)

srcnn = SRCNN()
criterion = nn.MSELoss()

if (use_cuda):
    torch.cuda.set_device(opt.gpuid)
    srcnn.cuda()
    criterion = criterion.cuda()

optimizer = optim.SGD(srcnn.parameters(), lr=opt.lr)
#optimizer = optim.Adam(srcnn.parameters(),lr=opt.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = Variable(batch[0]), Variable(batch[1])
        if use_cuda:
            input = input.cuda()
            target = target.cuda()

        optimizer.zero_grad()
        model_out = srcnn(input)
        loss = criterion(model_out, target)
        epoch_loss += loss.data[0]