Example #1
0
    def __getitem__(self, index):
        # Load appropriate paths
        config_key = self.dataset_keys[index % self.dataset_size]
        mesh_label = get_fn(self.config[config_key]['geometry_path'])

        target_img_path = os.path.join(self.dir_target_img,
                                       f'{config_key}.png')
        target_mask_path = os.path.join(self.dir_target_mask,
                                        f'{config_key}.png')
        gbuffer_position_path = os.path.join(self.dir_gbuffer_position,
                                             f'{mesh_label}.png')
        gbuffer_normal_path = os.path.join(self.dir_gbuffer_normal,
                                           f'{mesh_label}.png')
        gbuffer_mask_path = os.path.join(self.dir_gbuffer_mask,
                                         f'{mesh_label}.png')

        # Load target and mask
        target_img = imread(target_img_path)
        target_mask = imread(target_mask_path)

        # Add noise to target image
        target_img = self.noise(target_img)
        target = target_img * target_mask
        target = self.image_transform(target)

        disc_mask   = self.disc_mask_transform(target_mask[:,:,:1])\
                          .transpose([2, 0, 1])
        disc_mask = self.mask_transform(disc_mask)
        target_mask = target_mask[:, :, :1]
        target_mask = self.mask_transform(target_mask)

        # Load Gbuffer Images and alphamask
        gbuffer_position = imread(gbuffer_position_path)
        gbuffer_normal = imread(gbuffer_normal_path)
        gbuffer_mask = imread(gbuffer_mask_path)

        gbuffer = np.concatenate([gbuffer_position, gbuffer_normal], axis=-1)
        gbuffer = gbuffer * gbuffer_mask[:, :, :1]
        gbuffer = self.image_transform(gbuffer)

        gbuffer_mask = np.repeat(gbuffer_mask[:, :, :1], 4, axis=-1)
        gbuffer_mask = self.mask_transform(gbuffer_mask)

        return {
            'gbuffer': gbuffer,
            'gbuffer_mask': gbuffer_mask,
            'target': target,
            'target_mask': target_mask,
            'disc_mask': disc_mask,
            'config_keys': config_key
        }
Example #2
0
def main():
    MESH_ROOT = './datasets/meshes/clean_serialized'
    CAR_ROSTER = [get_fn(path) for path in get_child_paths(MESH_ROOT)]

    POS_GBUF_ROOT = './datasets/gbuffers/position'
    NOR_GBUF_ROOT = './datasets/gbuffers/normal'
    MSK_GBUF_ROOT = './datasets/gbuffers/mask'

    HEIGHT_DIRT_ROOT = './datasets/textures/height'
    CURVE_DIRT_ROOT = './datasets/textures/curve'

    for name in CAR_ROSTER:
        print(f'Generating dirt for {name}')
        pos = imread(os.path.join(POS_GBUF_ROOT, f'{name}.png'))
        nrm = imread(os.path.join(NOR_GBUF_ROOT, f'{name}.png'))
        msk = imread(os.path.join(MSK_GBUF_ROOT, f'{name}.png'))

        h_dirt = height_dirt(pos, nrm, msk)
        imwrite(h_dirt, os.path.join(HEIGHT_DIRT_ROOT, f'{name}.png'))
        c_dirt = curve_dirt(pos, nrm, msk)
        imwrite(c_dirt, os.path.join(CURVE_DIRT_ROOT, f'{name}.png'))
Example #3
0
        'cam_translation': [0.0, -0.75, 0.0],
        'cam_distance': 7.0,
        'cam_fov': [45.0],
        'cam_resolution': [512, 512],
        'geometry_path': './datasets/meshes/serialized/octavia_clean.pth',
        'tex_diffuse_color': [0.8, 0.8, 0.8],
        'tex_specular_color': [0.8, 0.8, 0.8],
        'envmap_path': './datasets/envmaps/one/sunsky.exr',
        'envmap_signal_mean': 0.5,
        'envmap_rotation': 0.0,
        'opt_num_samples': [200, 1],
        'opt_max_bounces': 2,
        'opt_channels_str': ['radiance'],
        'opt_render_seed': 0,
    }
}
render_config = RenderConfig()
render_layer = RenderLayer(render_config, device)

img = torch.tensor(imread(path), dtype=torch.float32, device=device)

#for fpath in get_child_paths(meshes_path):
#    print(f'Rendering {fpath}')
#    render_config.data[render_config.cfg_id]['geo_mesh_path'] = fpath
#    name = get_fn(fpath)
#    out = render_layer(opaque)
#    imwrite(out, f'debug/new_mesh_qual/{name}.png')
render_config.set_scene(scene_dict['test'])
out = render_layer(img)
imwrite(out, "debug/test_render_out.png")
Example #4
0
def default_loader(path):
    return imread(path)
Example #5
0
model = DeepLabModel(model_path)

dataroot = './datasets/tiam/'
resolution = 512
raw_dir = os.path.join(dataroot, 'raw')
img_dir = os.path.join(dataroot, 'img')
mask_dir = os.path.join(dataroot, 'mask')

if not os.path.exists(img_dir):
    os.makedirs(img_dir)
if not os.path.exists(mask_dir):
    os.makedirs(mask_dir)

resize = Resize(resolution)
fn_list = os.listdir(raw_dir)
for fn in fn_list:
    # Fetch file metadata
    fn = fn.split('.')[0]
    print('Processing: {}'.format(fn), end='\t')
    raw_path = os.path.join(raw_dir, '{}.JPG'.format(fn))
    img_path = os.path.join(img_dir, '{}.png'.format(fn))
    mask_path = os.path.join(mask_dir, '{}.png'.format(fn))

    img = resize(imread(raw_path)) * 255.0
    img = np.rot90(img, -1)
    mask = model.run(img)

    print('Saving mask')
    imwrite(img / 255.0, img_path)
    imwrite(mask, mask_path)
Example #6
0
BLENDER = '/home/budmonde/opt/blender/blender'
OBJ_DIR = './datasets/meshes/learn'
OUT_PATH = './datasets/textures/bake'

if not os.path.exists(OUT_PATH):
    os.makedirs(OUT_PATH)

sampler = RGBFileSamplerFactory('./datasets/distributions/diffuse.txt')

data = dict()
for in_path in get_child_paths(OBJ_DIR, ext='obj'):
    mesh_name = get_fn(path)
    out_path = os.path.join(OUT_PATH, f'{mesh_name}.png')
    data[mesh_name] = sampler()
    subprocess.run([
        BLENDER, '--background', '--python', 'blender/add_dirt_blender.py',
        '--', '--input_path', in_path, '--output_path', out_path, '--albedo_r',
        str(data[path][0]), '--albedo_g',
        str(data[path][1]), '--albedo_b',
        str(data[path][2])
    ],
                   check=True)

    # Post process output
    image = imread(out_path)
    image = rotate(image, 180)
    imwrite(image, out_path)

    with open(os.path.join(OUT_PATH, 'data.json'), 'w') as f:
        json.dump(data, f)
Example #7
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)

        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = ['D', 'G']
        # specify the images you want to save/display. The program will call base_model.get_current_visuals
        visual_names_tex = [
            'gbuffer_position', 'gbuffer_normal', 'synth_tex_show'
        ]
        visual_names_render = ['target', 'synth']
        visual_names_loss = ['heatmap_real', 'heatmap_fake']
        self.visual_names = visual_names_tex + visual_names_render + visual_names_loss

        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load Gs
            self.model_names = ['G']

        # load/define networks

        ### Begin Generator Pipeline ###

        # 1. Generate Texture from Gbuffer prior
        self.netG = networks.define_G(opt.input_nc, opt.texture_nc, opt.ngf,
                                      opt.netG, opt.norm, not opt.no_dropout,
                                      opt.init_type, opt.init_gain,
                                      self.gpu_ids)
        # 2. Pre-processing:
        #    - Strip batch dimension
        #    - Normalize to [0,1] domain
        #    - Switch dimension order
        #    - Mask out unlearneable texture values **applied at runtime**
        self.pre_process = nn.Sequential(
            StripBatchDimLayer(),
            NormalizeLayer(-1.0, 2.0),
            CHW2HWCLayer(),
        )

        # 3. Render Image
        self.scene_dict = json.loads(
            open(os.path.join(opt.dataroot, 'data.json')).read())
        self.render_config = RenderConfig()
        self.override = ConfigSampler({
            #'envmap_path'   : PathSamplerFactory(
            #    './datasets/envmaps/rasters', ext='exr'),
            'envmap_rotation':
            UniformSamplerFactory(0.0, 1.0),
        })

        self.render = RenderLayer(self.render_config, self.device, True)

        # 4. Post-processing:
        #    - Add Signal Noise
        #    - Composit Alpha layer to mask out environment map
        #    - Undo switch dimension order
        #    - Undo normalization
        #    - Add back batch dimension
        noise_kwargs = {
            "sigma": opt.gaussian_sigma,
            "device": self.device,
        }
        composit_kwargs = {
            "background": np.array([[[0.0, 0.0, 0.0]]]),
            "size": opt.crop_size,
            "device": self.device,
        }
        self.post_process = nn.Sequential(
            GaussianNoiseLayer(**noise_kwargs),
            CompositLayer(**composit_kwargs),
            HWC2CHWLayer(),
            NormalizeLayer(0.5, 0.5),
            AddBatchDimLayer(),
        )

        background = imread(opt.viz_composit_bkgd_path)

        visdom_kwargs = {
            "background": background,
            "size": opt.crop_size,
            "device": self.device,
        }
        self.composit_layer = NormalizedCompositLayer(**visdom_kwargs)

        ### End Generator Pipeline ###

        if self.isTrain:
            self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm,
                                          opt.init_type, opt.init_gain,
                                          self.gpu_ids)

        if self.isTrain:
            self.synth_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Example #8
0
    img = skimage.transform.warp(img, tfm, order=1, preserve_range=True)

    return (img, mask)

fn_list = os.listdir(RAW_IMG_DIR)
for fn in fn_list:
    # Fetch file metadata
    fn = fn.split('.')[0]
    #if fn != 'frame_0005_0_5_5049041':
    #    continue
    print('Processing: {}'.format(fn), end='\t')
    img_fpath = os.path.join(RAW_IMG_DIR, '{}.exr'.format(fn))
    label_fpath = os.path.join(LABEL_DIR, 'label_{}.png'.format(fn))

    # Open images. Open label using PIL because image is encoded in uint8
    img = imread(img_fpath)
    label = np.array(Image.open(label_fpath))

    # Isolate masks
    label[label != 250] = 0.
    label[label == 250] = 1.

    # Clean up the mask
    label = label.astype(bool)
    threshold = 70000
    skimage.morphology.remove_small_objects(
            label, min_size = threshold,in_place=True)
    skimage.morphology.remove_small_holes(label, area_threshold = threshold, in_place=True)
    label = label.astype('uint8')

    # Normalize the mask
Example #9
0
def generate_poses(model_path, output_path):
    # Init logger
    log = dict()

    # Load renderer configs
    material_map, mesh_list, light_map = pyredner.load_obj(model_path)
    material_id_map = {}
    materials = []
    count = 0
    for key, value in material_map.items():
        material_id_map[key] = count
        count += 1
        materials.append(value)

    shapes = []
    for mtl_name, mesh in mesh_list:
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

    envmap = pyredner.EnvironmentMap(
        torch.tensor(imread('./datasets/envmaps/one/sunsky.exr'),
                     dtype=torch.float32,
                     device=pyredner.get_device()))

    # Object pose parameters
    euler_angles = [0.0, 0.0, 0.0]
    translation = [0.0, -0.75, 0.0]
    up = [0.0, 1.0, 0.0]
    distance = 7.0

    # Setup base scene to modify during iterations
    cam_params = camera_parameters(euler_angles, translation, distance, up)

    camera = pyredner.Camera(position=torch.tensor(cam_params[0],
                                                   dtype=torch.float32),
                             look_at=torch.tensor(cam_params[1],
                                                  dtype=torch.float32),
                             up=torch.tensor(cam_params[2],
                                             dtype=torch.float32),
                             fov=torch.tensor([45.0]),
                             clip_near=1e-2,
                             resolution=(opt.resolution, opt.resolution),
                             fisheye=False)

    scene = pyredner.Scene(camera,
                           shapes,
                           materials,
                           area_lights=[],
                           envmap=envmap)

    # Generate alphamasks
    for i in range(opt.num_elev):
        # Set elevation angle
        elev_pc = i / opt.num_elev
        elevation = opt.max_elev * elev_pc + opt.min_elev * (1 - elev_pc)
        euler_angles[1] = elevation

        # Calculate number of azimuthal iterations
        num_azimuth = int(opt.num_elev * math.sin(math.pi / 2 - elevation))
        for j in range(num_azimuth):
            # Set azimuthal angle
            azimuth_pc = j / num_azimuth
            azimuth = math.pi * 2 * azimuth_pc

            euler_angles[0] = azimuth

            print('Params: Elevation - {:.4f}\tAzimuth - {:.4f}'\
                    .format(elevation, azimuth))

            # Set Camera params
            cam_params = camera_parameters(euler_angles, translation, distance,
                                           up)

            # Update scene params
            scene.camera = pyredner.Camera(
                position=torch.tensor(cam_params[0], dtype=torch.float32),
                look_at=torch.tensor(cam_params[1], dtype=torch.float32),
                up=torch.tensor(cam_params[2], dtype=torch.float32),
                fov=torch.tensor([45.0]),
                clip_near=1e-2,
                resolution=(opt.resolution, opt.resolution),
                fisheye=False)
            args = pyredner.RenderFunction.serialize_scene(
                scene=scene,
                num_samples=1,
                max_bounces=1,
                channels=[redner.channels.alpha])

            out = pyredner.RenderFunction.apply(1, *args)

            fn = gen_hash(6)
            imwrite(out, os.path.join(output_path, '{}.png'.format(fn)))
            log[fn] = {'elevation': elevation, 'azimuth': azimuth}
    return log
    img = skimage.transform.resize(img,
        (opt.resolution, opt.resolution), order = 0)
    return img

# Load pose bank config
with open(os.path.join(opt.pose_bank_path, 'data.json'), 'r') as f:
    data = json.load(f)

# Load images to estimate poses for
fpath_list = get_child_paths(os.path.join(opt.dataroot, 'mask'))

# Initialize objects before iteration
best_pose_dict = dict()

for fpath in fpath_list:
    image = crop_and_rescale(imread(fpath))

    best_pose = dict()
    best_loss = float("inf")

    key = get_fn(fpath)

    print("Processing {}".format(key), end='\t')
    for k, v in data.items():
        pose_path = os.path.join(opt.pose_bank_path, '{}.png'.format(k))
        pose_img = crop_and_rescale(imread(pose_path)[:,:,0:1])

        l2 = np.sum((image - pose_img)**2)
        if l2 < best_loss:
            best_pose['geo_rotation'] = [v['azimuth'], v['elevation'], 0.0]
            best_loss = l2
Example #11
0
def main():
    # Load arguments
    parser = argparse.ArgumentParser()
    # Scene args
    parser.add_argument('--geometry_path',
                        type=str,
                        default='./datasets/meshes/clean_serialized')
    parser.add_argument('--envmaps_path',
                        type=str,
                        default='./datasets/envmaps/one')
    parser.add_argument('--diffuse_refl_path',
                        type=str,
                        default='./datasets/distributions/diffuse.txt')
    parser.add_argument('--textures_path',
                        type=str,
                        default='./datasets/textures/curve')
    parser.add_argument('--texture_size', type=int, default=256)
    # Output args
    parser.add_argument('--root_path', type=str, default='./datasets/renders/')
    parser.add_argument('--num_imgs', type=int, default=1000)
    parser.add_argument('--label', type=str, default='debug')
    # Misc
    parser.add_argument(
        '--gpu_id',
        type=int,
        default=0,
    )
    opt = parser.parse_args()

    # Create Output directory
    now = datetime.now()
    subdir = f'{opt.label}_{now.month}-{now.day}-{now.hour}-{now.minute}'
    out_path = os.path.join(opt.root_path, subdir)
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # Load samplers
    sampler = ConfigSampler({
        'cam_rotation':
        HemisphereSamplerFactory([0.0, 0.5], [0.0, 0.0], [0.0, 0.0]),
        'cam_translation':
        BoxSamplerFactory([-0.1, 0.1], [-0.76, -0.74], [-0.1, 0.1]),
        'cam_distance':
        ConstantSamplerFactory(7.0),
        'cam_fov':
        ConstantSamplerFactory([45.0]),
        'cam_resolution':
        ConstantSamplerFactory([256, 256]),
        'geometry_path':
        PathSamplerFactory(opt.geometry_path, ext='pth'),
        'tex_diffuse_color':
        RGBFileSamplerFactory(opt.diffuse_refl_path),
        'tex_specular_color':
        ConstantSamplerFactory([0.8, 0.8, 0.8]),
        'envmap_path':
        PathSamplerFactory(opt.envmaps_path, ext='exr'),
        'envmap_signal_mean':
        ConstantSamplerFactory(0.5),
        'envmap_rotation':
        ConstantSamplerFactory(0.0),
        'opt_num_samples':
        ConstantSamplerFactory((200, 1)),
        'opt_max_bounces':
        ConstantSamplerFactory(2),
        'opt_channels_str':
        ConstantSamplerFactory(['radiance', 'alpha']),
        'opt_render_seed':
        RandIntSamplerFactory(0, 1e6),
    })

    # Init renderer
    device = torch.device(f'cuda:{opt.gpu_id}' if opt.gpu_id != -1 else 'cpu')
    config = RenderConfig()
    renderer = Render(config, device)

    log = dict()
    for i in range(opt.num_imgs):
        # Generate render id and scene configs
        key = gen_hash(6)
        while key in log.keys():
            key = gen_hash()
        scene = sampler.generate()
        log[key] = scene
        config.set_scene(scene)

        # Set texture for rendering
        mesh_name = get_fn(config('geometry_path'))
        texture = imread(os.path.join(opt.textures_path, f'{mesh_name}.png'))
        texture = resize(texture, (opt.texture_size, opt.texture_size))
        texture = torch.tensor(texture, dtype=torch.float32, device=device)

        # Time Render operation
        iter_start_time = time.time()
        out = renderer(texture)
        render_time = time.time() - iter_start_time
        print(f'Generated Image: #\t{i} -- {key} in {render_time}')
        imwrite(out[..., :3], os.path.join(out_path, 'img', f'{key}.png'))
        imwrite(out[..., 3:4], os.path.join(out_path, 'mask', f'{key}.png'))

        with open(os.path.join(out_path, 'data.json'), 'w') as metafile:
            json.dump(log, metafile)