Ejemplo n.º 1
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cudnn.benchmark = True

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(reduction='sum')

    print("===> Using GPU %d" % opt.gpu)
    torch.cuda.set_device(opt.gpu)
    model = model.cuda(opt.gpu)
    criterion = criterion.cuda(opt.gpu)

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            model.load_state_dict(checkpoint["model"].state_dict(),
                                  strict=False)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    if opt.quant:
        if os.path.isfile(opt.quant_param):
            model.quantize_from(opt.quant_param)
            print('model quantized from ' + opt.quant_param)
        else:
            print("=> no quantize checkpoint found at '{}'".format(
                opt.quant_param))
            exit(1)

    if opt.blu:
        model.load_blu('blu_train.data')
        print('loaded blu from ' + 'blu_train.data')

    print("===> Setting Optimizer")
    #optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay)

    print("===> Loading datasets")
    #train_set = DatasetFromHdf5("data/train.h5")
    #training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    training_data_loader = ndarrayLoader('data\\input.data',
                                         'data\\target.data',
                                         shuffle=True,
                                         batch_size=opt.batchSize)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch, opt)
        result = eval.main(model, opt.blu, 'Set5')
        with open('result.txt', 'a') as f:
            f.write('epoch:%d\n' % epoch)
            f.write(result)
        save_checkpoint(model, epoch)
Ejemplo n.º 2
0
                avg_psnr_predicted += psnr_predicted
                #print(image_name,':Bicubic ',psnr_bicubic,'predicted:',psnr_predicted)
        result+="Scale=%d, PSNR_bicubic=%.3f PSNR_predicted=%.3f\n" % (scale,avg_psnr_bicubic/count, avg_psnr_predicted / count)
    print(result)
    return result

if __name__ =='__main__':
    model_path='model\\model_bias_blu_epoch_54.pth'
    gpu=0
    quant=True
    quant_param='quant.data'
    blu=True
    torch.cuda.set_device(gpu)
    model = Net()
    checkpoint = torch.load(model_path, map_location='cpu')
    model.load_state_dict(checkpoint["model"].state_dict(),strict=False)
    print('loaded ' + model_path)

    #model.dump('model.data')
    #exit(0)
    model.cuda()
    if quant:
        if os.path.isfile(quant_param):
            model.quantize_from(quant_param)
            print('model quantized from ' + quant_param)
        else:
            model.quantize('quant.data')
    if blu:
        model.load_blu('blu_train.data')
        print('Loaded BLU from ' + 'blu_train.data')
    main(model,blu,"Set14")