'--model_type',
    type=str,
    default='sine',
    help=
    'Options currently are "sine" (all sine activations), "relu" (all relu activations,'
    '"nerf" (relu activations and positional encoding as in NeRF), "rbf" (input rbf layer, rest relu),'
    'and in the future: "mixed" (first layer sine, other layers tanh)')

p.add_argument('--checkpoint_path',
               default=None,
               help='Checkpoint to trained model.')
opt = p.parse_args()

img_dataset = dataio.Camera()
coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                         sidelength=512,
                                         compute_diff='all')
image_resolution = (512, 512)

dataloader = DataLoader(coord_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)

# Define the model.
if opt.model_type == 'sine' or opt.model_type == 'relu' or opt.model_type == 'tanh' or opt.model_type == 'selu' or opt.model_type == 'elu'\
        or opt.model_type == 'softplus':
    model = modules.SingleBVPNet(type=opt.model_type,
                                 mode='mlp',
                                 sidelength=image_resolution)
Example #2
0
               help='Checkpoint to trained model.')

p.add_argument('--conv_encoder',
               action='store_true',
               default=False,
               help='Use convolutional encoder process')
opt = p.parse_args()

assert opt.dataset == 'celeba_32x32'
if opt.conv_encoder: gmode = 'conv_cnp'
else: gmode = 'cnp'

img_dataset = dataio.CelebA(split='train', downsampled=True)
coord_dataset = dataio.Implicit2DWrapper(
    img_dataset,
    sidelength=(32, 32),
    train_sparsity_range=opt.train_sparsity_range,
    generalization_mode=gmode)
image_resolution = (32, 32)

dataloader = DataLoader(coord_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)

if opt.conv_encoder:
    model = meta_modules.ConvolutionalNeuralProcessImplicit2DHypernet(
        in_features=img_dataset.img_channels,
        out_features=img_dataset.img_channels,
        image_resolution=image_resolution)
Example #3
0
        psnr_list_nemo = []
        bpp_list_nemo = []
        ssim_list_nemo = []
        psnr_list = []
        bpp_list = []
        ssim_list = []
        for im in imglob:

            image_name = im.split('/')[-1].split('.')[0]

            img_dataset = dataio.ImageFile(im)
            img = PIL.Image.open(im)
            scale = TRAINING_FLAGS['downscaling_factor']
            image_resolution = (img.size[1] // scale, img.size[0] // scale)

            coord_dataset = dataio.Implicit2DWrapper(
                img_dataset, sidelength=image_resolution)

            dataloader = DataLoader(coord_dataset,
                                    shuffle=True,
                                    batch_size=1,
                                    pin_memory=True,
                                    num_workers=0)
            #hu = int(experiment_name.split('hu')[0])
            if 'encoding_scale' in TRAINING_FLAGS:
                s = TRAINING_FLAGS['encoding_scale']

            else:
                s = 0
            if 'bn' not in TRAINING_FLAGS:
                TRAINING_FLAGS['bn'] = False
            if 'intermediate_losses' not in TRAINING_FLAGS:
Example #4
0
p.add_argument('--conv_encoder',
               action='store_true',
               default=False,
               help='Use convolutional encoder process')
p.add_argument('--partial_conv',
               default=False,
               help='Set up partial convolutions')
opt = p.parse_args()

assert opt.dataset == 'celeba_32x32'
if opt.conv_encoder: gmode = 'conv_cnp'
else: gmode = 'cnp'

img_dataset = dataio.CelebA(split='train', downsampled=True)
coord_dataset = dataio.Implicit2DWrapper(img_dataset, sidelength=(32, 32))
generalization_dataset = dataio.ImageGeneralizationWrapper(
    coord_dataset,
    train_sparsity_range=opt.train_sparsity_range,
    generalization_mode=gmode)
image_resolution = (32, 32)

dataloader = DataLoader(generalization_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)

num_shots = 100
num_shots_test = 100
batch_size = 30
Example #5
0
               action='store_true',
               default=False,
               help='Use a partial convolution encoder')
opt = p.parse_args()

if opt.experiment_name is None:
    opt.experiment_name = opt.checkpoint_path.split('/')[-3] + '_TEST'
else:
    opt.experiment_name = opt.checkpoint_path.split(
        '/')[-3] + '_' + opt.experiment_name

assert opt.dataset == 'celeba_32x32'
img_dataset_test = dataio.CelebA(split='test', downsampled=True)
coord_dataset_test = dataio.Implicit2DWrapper(
    img_dataset_test,
    sidelength=(32, 32),
    test_sparsity=200,
    generalization_mode='conv_cnp_test')
image_resolution = (32, 32)

img_dataset_train = dataio.CelebA(split='train', downsampled=True)
coord_dataset_train = dataio.Implicit2DWrapper(
    img_dataset_train,
    sidelength=(32, 32),
    test_sparsity=200,
    generalization_mode='conv_cnp_test')

# Define the model.
model = meta_modules.ConvolutionalNeuralProcessImplicit2DHypernet(
    in_features=img_dataset_test.img_channels,
    out_features=img_dataset_test.img_channels,
               help='Amount of subsampled pixels input into the set encoder')
p.add_argument('--partial_conv',
               action='store_true',
               default=False,
               help='Use a partial convolution encoder')
opt = p.parse_args()

if opt.experiment_name is None:
    opt.experiment_name = opt.checkpoint_path.split('/')[-3] + '_TEST'
else:
    opt.experiment_name = opt.checkpoint_path.split(
        '/')[-3] + '_' + opt.experiment_name

assert opt.dataset == 'celeba_32x32'
img_dataset_test = dataio.CelebA(split='test', downsampled=True)
coord_dataset_test = dataio.Implicit2DWrapper(img_dataset_test,
                                              sidelength=(32, 32))
generalization_dataset_test = dataio.ImageGeneralizationWrapper(
    coord_dataset_test, test_sparsity=200, generalization_mode='conv_cnp_test')
image_resolution = (32, 32)

img_dataset_train = dataio.CelebA(split='train', downsampled=True)
coord_dataset_train = dataio.Implicit2DWrapper(img_dataset_train,
                                               sidelength=(32, 32))
generalization_dataset_train = dataio.ImageGeneralizationWrapper(
    coord_dataset_train,
    test_sparsity=200,
    generalization_mode='conv_cnp_test')

# Define the model.
model = meta_modules.ConvolutionalNeuralProcessImplicit2DHypernet(
    in_features=img_dataset_test.img_channels,
               help='Checkpoint to trained model.')

p.add_argument('--mask_path',
               type=str,
               default=None,
               help='Path to mask image')
p.add_argument('--custom_image',
               type=str,
               default=None,
               help='Path to single training image')
opt = p.parse_args()

if opt.dataset == 'camera':
    img_dataset = dataio.Camera()
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=512,
                                             compute_diff='all')
    image_resolution = (512, 512)
if opt.dataset == 'camera_downsampled':
    img_dataset = dataio.Camera(downsample_factor=2)
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='all')
    image_resolution = (256, 256)
if opt.dataset == 'custom':
    img_dataset = dataio.ImageFile(opt.custom_image)
    coord_dataset = dataio.Implicit2DWrapper(
        img_dataset,
        sidelength=(img_dataset[0].size[1], img_dataset[0].size[0]),
        compute_diff='all')
    image_resolution = (img_dataset[0].size[1], img_dataset[0].size[0])
    '--model_type',
    type=str,
    default='sine',
    help=
    'Options are "sine" (all sine activations) and "mixed" (first layer sine, other layers tanh)'
)

p.add_argument('--checkpoint_path',
               default=None,
               help='Checkpoint to trained model.')
opt = p.parse_args()

if opt.dataset == 'camera':
    img_dataset = dataio.Camera()
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='gradients')
elif opt.dataset == 'bsd500':
    # you can select the image your like in idx to sample
    img_dataset = dataio.BSD500ImageDataset(in_folder='../data/BSD500/train',
                                            idx_to_sample=[19])
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='gradients')

dataloader = DataLoader(coord_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)
    '--model_type',
    type=str,
    default='sine',
    help=
    'Options are "sine" (all sine activations) and "mixed" (first layer sine, other layers tanh)'
)

p.add_argument('--checkpoint_path',
               default=None,
               help='Checkpoint to trained model.')
opt = p.parse_args()

if opt.dataset == 'camera':
    img_dataset = dataio.Camera()
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='laplacian')
elif opt.dataset == 'bsd500':
    # you can select the image your like in idx to sample
    img_dataset = dataio.BSD500ImageDataset(
        in_folder='/media/data3/awb/BSD500/train', idx_to_sample=[19])
    coord_dataset = dataio.Implicit2DWrapper(img_dataset,
                                             sidelength=256,
                                             compute_diff='laplacian')

dataloader = DataLoader(coord_dataset,
                        shuffle=True,
                        batch_size=opt.batch_size,
                        pin_memory=True,
                        num_workers=0)