コード例 #1
0
p.add_argument('--checkpoint_path',
               default=None,
               help='Checkpoint to trained model.')

p.add_argument('--mask_path',
               type=str,
               default=None,
               help='Path to mask image')
p.add_argument('--custom_image',
               type=str,
               default=None,
               help='Path to single training image')
opt = p.parse_args()

if opt.dataset == 'camera':
    img_dataset = dataio.Camera()
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=512,
                                             compute_diff='all')
    image_resolution = (512, 512)
if opt.dataset == 'camera_downsampled':
    img_dataset = dataio.Camera(downsample_factor=2)
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='all')
    image_resolution = (256, 256)
if opt.dataset == 'custom':
    img_dataset = dataio.ImageFile(opt.custom_image)
    coord_dataset = dataio.Implicit2DWrapper(
        img_dataset,
        sidelength=(img_dataset[0].size[1], img_dataset[0].size[0]),
コード例 #2
0
p.add_argument(
    '--model_type',
    type=str,
    default='sine',
    help=
    'Options currently are "sine" (all sine activations), "relu" (all relu activations,'
    '"nerf" (relu activations and positional encoding as in NeRF), "rbf" (input rbf layer, rest relu),'
    'and in the future: "mixed" (first layer sine, other layers tanh)')

p.add_argument('--checkpoint_path',
               default=None,
               help='Checkpoint to trained model.')
opt = p.parse_args()

img_dataset = dataio.Camera()
coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                         sidelength=512,
                                         compute_diff='all')
image_resolution = (512, 512)

dataloader = DataLoader(coord_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)

# Define the model.
if opt.model_type == 'sine' or opt.model_type == 'relu' or opt.model_type == 'tanh' or opt.model_type == 'selu' or opt.model_type == 'elu'\
        or opt.model_type == 'softplus':
    model = modules.SingleBVPNet(type=opt.model_type,
コード例 #3
0
def main():
    if opt.dataset == 'camera':
        img_dataset = dataio.Camera()
    elif opt.dataset == 'pluto':
        pluto_url = "https://upload.wikimedia.org/wikipedia/commons/e/ef/Pluto_in_True_Color_-_High-Res.jpg"
        img_dataset = dataio.ImageFile('../data/pluto.jpg', url=pluto_url, grayscale=opt.grayscale)
    elif opt.dataset == 'tokyo':
        img_dataset = dataio.ImageFile('../data/tokyo.tif', grayscale=opt.grayscale)
    elif opt.dataset == 'mars':
        img_dataset = dataio.ImageFile('../data/mars.tif', grayscale=opt.grayscale)

    if len(opt.patch_size) == 1:
        opt.patch_size = 3*opt.patch_size

    # set up dataset
    coord_dataset = dataio.Patch2DWrapperMultiscaleAdaptive(img_dataset,
                                                            sidelength=opt.res,
                                                            patch_size=opt.patch_size[1:], jitter=True,
                                                            num_workers=opt.num_workers, length=opt.steps_til_tiling,
                                                            scale_init=opt.scale_init, max_patches=opt.max_patches)

    opt.num_epochs = opt.num_iters // coord_dataset.__len__()

    image_resolution = (opt.res, opt.res)

    dataloader = DataLoader(coord_dataset, shuffle=False, batch_size=1, pin_memory=True,
                            num_workers=opt.num_workers)

    if opt.resume is not None:
        path, iter = opt.resume
        iter = int(iter)
        assert(os.path.isdir(path))
        assert opt.config is not None, 'Specify config file'

    # Define the model.
    if opt.grayscale:
        out_features = 1
    else:
        out_features = 3

    if opt.model_type == 'multiscale':
        model = modules.ImplicitAdaptivePatchNet(in_features=3, out_features=out_features,
                                                 num_hidden_layers=opt.hidden_layers,
                                                 hidden_features=opt.hidden_features,
                                                 feature_grid_size=(opt.patch_size[0], opt.patch_size[1], opt.patch_size[2]),
                                                 sidelength=opt.res,
                                                 num_encoding_functions=10,
                                                 patch_size=opt.patch_size[1:])

    elif opt.model_type == 'siren':
        model = modules.ImplicitNet(opt.res, in_features=2,
                                    out_features=out_features,
                                    num_hidden_layers=4,
                                    hidden_features=1536,
                                    mode='siren', w0=opt.w0)
    elif opt.model_type == 'pe':
        model = modules.ImplicitNet(opt.res, in_features=2,
                                    out_features=out_features,
                                    num_hidden_layers=4,
                                    hidden_features=1536,
                                    mode='pe')
    else:
        raise NotImplementedError('Only model types multiscale, siren, and pe are implemented')

    model.cuda()

    # print number of model parameters
    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    print(f'Num. Parameters: {params}')

    # Define the loss
    loss_fn = partial(loss_functions.image_mse,
                      tiling_every=opt.steps_til_tiling,
                      dataset=coord_dataset,
                      model_type=opt.model_type)
    summary_fn = partial(utils.write_image_patch_multiscale_summary, image_resolution, opt.patch_size[1:], coord_dataset, model_type=opt.model_type, skip=opt.skip_logging)

    # Define the pruning function
    pruning_fn = partial(pruning_functions.no_pruning,
                         pruning_every=1)

    # if we are resuming from a saved checkpoint
    if opt.resume is not None:
        print('Loading checkpoints')
        model_dict = torch.load(path + '/checkpoints/' + f'model_{iter:06d}.pth')
        model.load_state_dict(model_dict)

        # load optimizers
        try:
            resume_checkpoint = {}
            optim_dict = torch.load(path + '/checkpoints/' + f'optim_{iter:06d}.pth')
            for g in optim_dict['optimizer_state_dict']['param_groups']:
                g['lr'] = opt.lr
            resume_checkpoint['optimizer_state_dict'] = optim_dict['optimizer_state_dict']
            resume_checkpoint['total_steps'] = optim_dict['total_steps']
            resume_checkpoint['epoch'] = optim_dict['epoch']

            # initialize model state_dict
            print('Initializing models')
            coord_dataset.quadtree.__load__(optim_dict['quadtree'])
            coord_dataset.synchronize()

        except FileNotFoundError:
            print('Unable to load optimizer checkpoints')
    else:
        resume_checkpoint = {}

    if opt.eval:
        run_eval(model, coord_dataset)
    else:
        # Save command-line parameters log directory.
        root_path = os.path.join(opt.logging_root, opt.experiment_name)
        utils.cond_mkdir(root_path)
        p.write_config_file(opt, [os.path.join(root_path, 'config.ini')])

        # Save text summary of model into log directory.
        with open(os.path.join(root_path, "model.txt"), "w") as out_file:
            out_file.write(str(model))

        objs_to_save = {'quadtree': coord_dataset.quadtree}

        training.train(model=model, train_dataloader=dataloader, epochs=opt.num_epochs, lr=opt.lr,
                       steps_til_summary=opt.steps_til_summary, epochs_til_checkpoint=opt.epochs_til_ckpt,
                       model_dir=root_path, loss_fn=loss_fn, pruning_fn=pruning_fn, summary_fn=summary_fn, objs_to_save=objs_to_save,
                       resume_checkpoint=resume_checkpoint)