Exemple #1
0
def validate(dataloader, net, criterion_MSE, args):
    batch_time = utilities.AverageMeter('Time', ':6.3f')
    losses = utilities.AverageMeter('Loss', ':.4e')
    progress = utilities.ProgressMeter(len(dataloader), [batch_time, losses], prefix='Validation: ')

    with torch.no_grad():
        end = time.time()
        for i, data in enumerate(dataloader):
            inputs = data['input_spectrum']
            inputs = inputs.float()
            inputs = inputs.cuda(args.gpu)
            target = data['output_spectrum']
            target = target.float()
            target = target.cuda(args.gpu)

            output = net(inputs)

            loss_MSE = criterion_MSE(output, target)
            losses.update(loss_MSE.item(), inputs.size(0))

            batch_time.update(time.time() - end)
            end = time.time()

            if i % 400 == 0:
                progress.display(i)

    return losses.avg
Exemple #2
0
def train(dataloader, net, optimizer, scheduler, criterion, criterion_MSE, epoch, args):
    
    batch_time = utilities.AverageMeter('Time', ':6.3f')
    losses = utilities.AverageMeter('Loss', ':.4e')
    progress = utilities.ProgressMeter(len(dataloader), [batch_time, losses], prefix="Epoch: [{}]".format(epoch))

    end = time.time()
    for i, data in enumerate(dataloader):
        inputs = data['input_spectrum']
        inputs = inputs.float()
        inputs = inputs.cuda(args.gpu)
        target = data['output_spectrum']
        target = target.float()
        target = target.cuda(args.gpu)

        output = net(inputs)

        optimizer.zero_grad()
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        if args.scheduler == "cyclic-lr" or args.scheduler == "one-cycle-lr":
            scheduler.step()   

        loss_MSE = criterion_MSE(output, target)
        losses.update(loss_MSE.item(), inputs.size(0)) 

        batch_time.update(time.time() - end)
        end = time.time()

        if i % 400 == 0:
            progress.display(i)
    return losses.avg
Exemple #3
0
def train(args, train_loader, model, criterion, optimizer, epoch, progress,
          train_time):
    batch_time = utilities.AverageMeter()
    data_time = utilities.AverageMeter()

    model.train()
    correct = 0

    end = time.time()

    for i, (data, label) in enumerate(train_loader):
        data_time.update(time.time() - end)
        if args.cuda:
            data, label = data.cuda(), label.cuda()
        data, label = Variable(data), Variable(label)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, label)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1)[1]
        correct += pred.eq(label.data).cpu().sum()

        batch_time.update(time.time() - end)
        end = time.time()

    if i % args.log_interval == 0:
        print('#Training Epoch:{} [{}/{} ({:.0f}%)]\tLoss:{:.6f}'.format(
            epoch, i * len(data), len(train_loader.dataset),
            100. * i / len(train_loader), loss.data[0]))
    train_time.update(batch_time.get_sum())
    train_acc = 100. * correct / len(train_loader.dataset)
    progress['train'].append(
        (epoch, loss.data[0], train_acc, batch_time.get_sum(),
         batch_time.get_avg(), data_time.get_sum(), data_time.get_avg()))
Exemple #4
0
def evaluate(dataloader, net, args):
    losses = utilities.AverageMeter('Loss', ':.4e')
    SG_loss = utilities.AverageMeter('Savitzky-Golay Loss', ':.4e')

    net.eval()

    MSE_SG = []

    with torch.no_grad():
        for i, data in enumerate(dataloader):
            x = data['input_spectrum']
            inputs = x.float()
            inputs = inputs.cuda(args.gpu)
            y = data['output_spectrum']
            target = y.float()
            target = target.cuda(args.gpu)

            x = np.squeeze(x.numpy())
            y = np.squeeze(y.numpy())

            output = net(inputs)
            loss = nn.MSELoss()(output, target)

            x_out = output.cpu().detach().numpy()
            x_out = np.squeeze(x_out)

            SGF_1_9 = scipy.signal.savgol_filter(x, 9, 1)
            MSE_SGF_1_9 = np.mean(
                np.mean(
                    np.square(
                        np.absolute(y - (SGF_1_9 -
                                         np.reshape(np.amin(SGF_1_9, axis=1),
                                                    (len(SGF_1_9), 1)))))))
            MSE_SG.append(MSE_SGF_1_9)

            losses.update(loss.item(), inputs.size(0))

        print("Neural Network MSE: {}".format(losses.avg))
        print("Savitzky-Golay MSE: {}".format(np.mean(np.asarray(MSE_SG))))
        print("Neural Network performed {0:.2f}x better than Savitzky-Golay".
              format(np.mean(np.asarray(MSE_SG)) / losses.avg))

    return losses.avg, MSE_SG
Exemple #5
0
def validate(dataloader, net, criterion_MSE, args):
    
    batch_time = utilities.AverageMeter('Time', ':6.3f')
    losses = utilities.AverageMeter('Loss', ':.4e')
    psnr = utilities.AverageMeter('PSNR', ':.4f')
    ssim = utilities.AverageMeter('SSIM', ':.4f')
    progress = utilities.ProgressMeter(len(dataloader), [batch_time, psnr, ssim], prefix='Validation: ')

    with torch.no_grad():
        end = time.time()
        for i, data in enumerate(dataloader):
            inputs = data['input_image']
            inputs = inputs.float()
            inputs = inputs.cuda(args.gpu)
            target = data['output_image']
            target = target.float()
            target = target.cuda(args.gpu)

            output = net(inputs)

            loss_MSE = criterion_MSE(output, target)
            losses.update(loss_MSE.item(), inputs.size(0)) 

            psnr_batch = utilities.calc_psnr(output, target)
            psnr.update(psnr_batch, inputs.size(0))

            ssim_batch = utilities.calc_ssim(output, target)
            ssim.update(ssim_batch, inputs.size(0))

            batch_time.update(time.time() - end)
            end = time.time()

            if i % 20 == 0:
                progress.display(i)

    return losses.avg, psnr.avg, ssim.avg
Exemple #6
0
        torch.cuda.manual_seed(args.seed)

    #Load training and testing data
    train_loader, test_loader = get_data_loader(args)
    #Load model
    model, best_acc, start_epoch = get_model(args)
    criterion = get_criterion(args)
    optimizer = get_optimizer(args, model)
    lr = args.lr
    if args.lr_clipping:
        milestones = utilities.parse_milestones(args.milestones)

    progress = {}
    progress['train'] = []
    progress['test'] = []
    train_time = utilities.AverageMeter()
    test_time = utilities.AverageMeter()

    for epoch in range(start_epoch, start_epoch + args.epochs):
        if args.lr_clipping:
            utilities.adjust_learning_rate(optimizer, lr, epoch, milestones)
    #train function
        train(args, train_loader, model, criterion, optimizer, epoch, progress,
              train_time)
        test(args, test_loader, model, criterion, epoch, progress, best_acc,
             test_time)
    progress['train_time'] = (train_time.get_avg(), train_time.get_sum())
    progress['test_time'] = (test_time.get_avg() / len(test_loader.dataset),
                             test_time.get_avg())

    current_time = utilities.get_current_time()
Exemple #7
0
def evaluate(dataloader, net, scale, args):
    
    psnr = utilities.AverageMeter('PSNR', ':.4f')
    ssim = utilities.AverageMeter('SSIM', ':.4f')
    mse_NN = utilities.AverageMeter('MSE', ':.4f')
    psnr_bicubic = utilities.AverageMeter('PSNR_Bicubic', ':.4f')
    ssim_bicubic = utilities.AverageMeter('SSIM_Bicubic', ':.4f')
    mse_bicubic = utilities.AverageMeter('MSE_Bicubic', ':.4f')
    psnr_nearest_neighbours = utilities.AverageMeter('PSNR_Nearest_Neighbours', ':.4f')
    ssim_nearest_neighbours = utilities.AverageMeter('SSIM_Nearest_Neighbours', ':.4f')
    mse_nearest_neighbours = utilities.AverageMeter('MSE_Nearest_Neighbours', ':.4f')
    
    net.eval()

    with torch.no_grad():
        for i, data in enumerate(dataloader):
            # measure data loading time
            x = data['input_image']
            inputs = x.float()
            inputs = inputs.cuda(args.gpu)
            y = data['output_image']
            target = y.float()
            target = target.cuda(args.gpu)

            # compute output
            output = net(inputs)

            x2 = np.squeeze(x.numpy())
            y2 = np.squeeze(y.numpy())

            nearest_neighbours = scipy.ndimage.zoom(x2,(1,scale,scale), order=0)
            bicubic = scipy.ndimage.zoom(x2,(1,scale,scale), order=3)
                            
            bicubic = torch.from_numpy(bicubic)
            bicubic = bicubic.cuda(args.gpu)
            
            nearest_neighbours = torch.from_numpy(nearest_neighbours)
            nearest_neighbours = nearest_neighbours.cuda(args.gpu)

            # Nearest neighbours
            psnr_batch_nearest_neighbours = utilities.calc_psnr(nearest_neighbours, target)
            psnr_nearest_neighbours.update(psnr_batch_nearest_neighbours, inputs.size(0))

            ssim_batch_nearest_neighbours = utilities.calc_ssim(nearest_neighbours, target)
            ssim_nearest_neighbours.update(ssim_batch_nearest_neighbours, inputs.size(0))

            mse_batch_nearest_neighbours = nn.MSELoss()(nearest_neighbours, target)
            mse_nearest_neighbours.update(mse_batch_nearest_neighbours, inputs.size(0))
            
            # Bicubic
            psnr_batch_bicubic = utilities.calc_psnr(bicubic, target)
            psnr_bicubic.update(psnr_batch_bicubic, inputs.size(0))

            ssim_batch_bicubic = utilities.calc_ssim(bicubic, target)
            ssim_bicubic.update(ssim_batch_bicubic, inputs.size(0))

            mse_batch_bicubic = nn.MSELoss()(bicubic, target)
            mse_bicubic.update(mse_batch_bicubic, inputs.size(0))
            
            # Neural network
            psnr_batch = utilities.calc_psnr(output, target)
            psnr.update(psnr_batch, inputs.size(0))

            ssim_batch = utilities.calc_ssim(output, target)
            ssim.update(ssim_batch, inputs.size(0))
            
            mse_batch = nn.MSELoss()(output, target)
            mse_NN.update(mse_batch, inputs.size(0))
            
    print("RCAN PSNR: {}    Bicubic PSNR: {}    Nearest Neighbours PSNR: {}".format(psnr.avg, psnr_bicubic.avg, psnr_nearest_neighbours.avg))
    print("RCAN SSIM: {}    Bicubic SSIM: {}    Nearest Neighbours SSIM: {}".format(ssim.avg, ssim_bicubic.avg, ssim_nearest_neighbours.avg))
    print("RCAN MSE:  {}    Bicubic MSE:  {}    Nearest Neighbours MSE:  {}".format(mse_NN.avg, mse_bicubic.avg, mse_nearest_neighbours.avg))
    return psnr.avg, psnr_bicubic.avg, psnr_nearest_neighbours.avg, ssim.avg, ssim_bicubic.avg, ssim_nearest_neighbours.avg, mse_NN.avg, mse_bicubic.avg, mse_nearest_neighbours.avg