Ejemplo n.º 1
0
def main(args):
    submodels = [];
    for model_index in range(args.submodel_num):
        submodel  = dalong_model.Submodel(args,args.init_depth[model_index]);
        if cfg.CUDA_USE :
            submodel = submodel.cuda();
        submodels.append(submodel);

    router = dalong_model.Encoder(args);
    if cfg.CUDA_USE :
        router = router.cuda();
    test_dataset = datasets.dataSet(args);
    test_loader = torch.utils.data.DataLoader(test_dataset,1,shuffle = False,num_workers = int(args.workers),collate_fn = datasets.collate_fn);
    for model_index in range(args.submodel_num):
        init_model = os.path.join('./models/SubModel_{}/{}/{}'.format(model_index,args.init_folder[model_index],args.init_submodel[model_index]));
        print('dalong log : for model {} , init with {}'.format(model_index,init_model));
        model_dict = torch.load(init_model);
        submodels[model_index].load_state_dict(model_dict);
    ##############
    # init Router
    #############
    init_model = os.path.join('./models/Encoder',args.init_router);
    model_dict = torch.load(init_model);
    router.load_state_dict(model_dict);

    Test(test_loader,submodels,router);
Ejemplo n.º 2
0
def main(args):

    models = {
        'DemosaicNet':
        dalong_model.DemosaicNet(args.depth,
                                 args.width,
                                 args.kernel_size,
                                 pad=args.pad,
                                 batchnorm=args.batchnorm,
                                 bayer_type=args.bayer_type),
        'DeepISP':
        dalong_model.DeepISP(args),
        'SIDNet':
        dalong_model.SIDNet(args),
        'BayerNet':
        dalong_model.BayerNetwork(args),
        'UNet':
        dalong_model.UNet(args),
        'DeNet':
        dalong_model.DeNet(args),
        'UNet2':
        dalong_model.UNet2(args),
        'FastDenoisaicking':
        dalong_model.FastDenoisaicking(args),
        'FilterModel':
        dalong_model.FilterModel(args),
        'Submodel':
        dalong_model.Submodel(args, args.depth),
    }
    test_dataset = datasets.dataSet(args)
    model = models.get(args.model, 'dalong')
    release_memory(models, args)
    if model == 'dalong':
        print('Not A model {}'.format(args.model))
        exit()
    collate_fn = datasets.collate_fn
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              args.TRAIN_BATCH,
                                              shuffle=False,
                                              num_workers=int(args.workers),
                                              collate_fn=collate_fn)

    print('dalong log : begin to load data')

    init_model = os.path.join(args.checkpoint_folder, args.init_model)
    if cfg.CUDA_USE:
        model = model.cuda()

    if args.init_model != '':
        print('dalong log : init model with {}'.format(args.init_model))
        model_dict = torch.load(init_model)
        model.load_state_dict(model_dict)

    test(test_loader, model)
    print('dalong log : test finished ')
Ejemplo n.º 3
0
def main(args):

    models = {'DemosaicNet':dalong_model.DemosaicNet(args.depth,args.width,args.kernel_size,pad = args.pad,batchnorm = args.batchnorm,bayer_type = args.bayer_type),
              'DeepISP':dalong_model.DeepISP(args),
              'SIDNet':dalong_model.SIDNet(args),
              'BayerNet':dalong_model.BayerNetwork(args),
              'UNet':dalong_model.UNet(args),
              'DeNet':dalong_model.DeNet(args),
              'UNet2':dalong_model.UNet2(args),
              'FastDenoisaick':dalong_model.FastDenoisaicking(args),
              'FilterModel':dalong_model.FilterModel(args),
              'Submodel':dalong_model.Submodel(args,args.depth),
              };

    Losses ={'L1Loss':dalong_loss.L1Loss(),
             'L2Loss':dalong_loss.L2Loss(),
             'SSIM':dalong_loss.SSIM(),
             'MSSIM':dalong_loss.MSSSIM(),
             'pixel_perceptural':dalong_loss.pixel_perceptural_loss(),
             'VGGLoss':dalong_loss.VGGLoss(),
             'BCELoss':dalong_loss.BCELoss(),
             'UNet2Loss':dalong_loss.UNet2Loss(args.size  * 2),
             };
    print('dalong log : all models init successfully');
    release_memory(models,Losses,args);
    train_dataset = datasets.dataSet(args);
    collate_fn = datasets.collate_fn;
    model = models.get(args.model,'dalong');
    train_loader = torch.utils.data.DataLoader(train_dataset,args.TRAIN_BATCH,shuffle = True,collate_fn = datasets.collate_fn,num_workers = int(args.workers));
    criterion  = Losses.get(args.loss,'dalong')
    optimizer = torch.optim.Adam(model.parameters(),lr = args.lr,betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-08);
    discriminator = None;
    adversarial_criterion = None;
    optim_discriminator = None;
    if args.TRAIN_GAN :
        discriminator = dalong_model.Discriminator();
        discriminator = torch.nn.DataParallel(discriminator);
        adversarial_criterion = dalong_loss.BCELoss();
        optim_discriminator = torch.optim.Adam(discriminator.parameters(),lr = 1e-4,betas = (0.9,0.999),eps = 1e-08,weight_decay =1e-08);
    if cfg.CUDA_USE :
        model = model.cuda();
        if args.TRAIN_GAN :
            discriminator = discriminator.cuda();
    for epoch in range(args.max_epoch):
        if args.TRAIN_GAN :
            trainGAN(train_loader,model,discriminator,criterion,adversarial_criterion,optimizer,optim_discriminator,epoch,args);
        else:
            train(train_loader,model,criterion,optimizer,epoch,args);
        if (epoch +1) % args.save_freq == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(args.checkpoint_folder,args.model_name,epoch+1);
            utils.save_checkpoint(model,path_checkpoint);
            print('save model at epoch = {}'.format(epoch+1));
    print('dalong log : train finished ');
Ejemplo n.º 4
0
def main(args):
    train_dataset = dataset.dataSet(args);
    print('dalong log : begin to load data');
    train_loader = torch.utils.data.DataLoader(train_dataset,args.batchsize,shuffle = True,num_workers = int(args.workers));
    model = models.SIDNet(args);
    print('dalong log : model build finished ');
    criterion = models.SIDNetLoss();
    print('dalong log : Loss build finished ');
    model = torch.nn.DataParallel(model,device_ids = list(args.gpu_use));
    model = model.cuda();
    optimizer = torch.optim.Adam(model.parameters(),lr = args.lr,betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-08);
    for epoch in range(args.max_epoch):
        train(train_loader,model,criterion,optimizer,epoch,args);
        if (epoch +1) % args.save_freq == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(args.checkpoint_folder,args.model_name,epoch+1);
            utils.save_checkpoint(model,path_checkpoint);
            print('dalong log : train finished ');
Ejemplo n.º 5
0
def main(args):

    print('dalong log : begin to load data')
    init_model = os.path.join(args.checkpoint_folder, args.init_model)
    test_dataset = datasets.dataSet(args)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batchsize,
                                              shuffle=False,
                                              num_workers=int(args.workers))
    #model = models.DemosaicNet(args.depth,args.width,args.kernel_size,pad = args.pad,batchnorm = args.batchnorm,bayer_type = args.bayer_type);
    model = models.BayerNetwork(args)
    model = torch.nn.DataParallel(model, device_ids=args.gpu_use)
    if args.init_model != '':
        print('dalong log : init model with {}'.format(args.init_model))
        model_dict = torch.load(init_model)
        model.load_state_dict(model_dict)
    model = model.cuda()
    test(test_loader, model)
    print('dalong log : test finished ')