Ejemplo n.º 1
0
            'params':
            models['upscale'][scale].parameters(),
            'lr':
            LEARNING_RATE * 0.1
        })
    optimizer['generator'].add_param_group({
        'params':
        models['extra'].parameters(),
        'lr':
        LEARNING_RATE * 0.1
    })
    optimizer['discriminator'] = optim.Adam(
        models['discriminator'].parameters(), lr=LEARNING_RATE)

    #set Meter to calculate the average of loss
    train_loss = meter.AverageMeter()

    #running
    for epoch in range(EPOCH_START, EPOCH_START + EPOCH):
        #select upscale factor
        scale = UPSCALE_FACTOR_LIST[epoch % len(UPSCALE_FACTOR_LIST)]
        datasets.scale_factor = scale

        for iteration in range(ITER_PER_EPOCH):
            train(models, scale, train_data_loader, criterion, optimizer,
                  train_loss, False)
        print('{:0>3d}: train_loss: {:.8f}, scale: {}'.format(
            epoch + 1, train_loss.avg, scale))

        #save
        if PRETRAINED == True:
Ejemplo n.º 2
0
                               batch_size=BSIZE,
                               sampler=SubsetRandomSampler(valid_idx),
                               num_workers=0)

model = model.IrisNetwork(4, 32, 3)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LRATE)

best_loss = 1.5
history = {
    'epoch': [],
    'train_loss': [],
    'valid_loss': [],
}
for epoch in range(NUM_EPOCH):
    batch_time = meter.AverageMeter()
    data_time = meter.AverageMeter()
    losses = meter.AverageMeter()

    end_time = time.time()
    for idx, (x_train, y_train) in enumerate(train_loader):
        data_time.update(time.time() - end_time)

        out = model(x_train)
        loss = criterion(out, y_train)
        loss.backward()
        optimizer.step()

        losses.update(loss.item(), x_train.size(0))
        batch_time.update(time.time() - end_time)
        end_time = time.time()
Ejemplo n.º 3
0
    device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    #load data
    test_datas=datasets.TestData[UPSCALE_FACTOR]
    #add model
    models={}
    models['generative']=model.FSRCNN().to(device)
    models['upscale']={}
    models['upscale'][UPSCALE_FACTOR]=model.Subpixel_Layer(models['generative'].output_channel,UPSCALE_FACTOR).to(device)
    models['extra']=model.Extra_Layer().to(device)
    interpolate=False

    #set criterion
    criterion=nn.MSELoss()

    #set meter
    PSNR=meter.AverageMeter()
    SSIM=meter.AverageMeter()
    
    for key in test_datas.keys():
        print(key)
        test_data=test_datas[key]
        test_data_loader=torch.utils.data.DataLoader(dataset=test_data,
                                                     batch_size=1,
                                                     shuffle=False)
        PSNR_array=[]
        SSIM_array=[]
        best=0
        for epoch in range(EPOCH):
            utils.load_model(models,UPSCALE_FACTOR,LOAD_PATH,epoch+1)
            with torch.no_grad():
                test(models,UPSCALE_FACTOR,test_data_loader,criterion,PSNR,SSIM,False)