예제 #1
0
    def __getitem__(self, idx):

        input = np.zeros(shape=(128, 128, 2))

        img_fn = os.path.join(self.root_dir, self.data_frame.ix[idx, 0])
        luminance = pyexr.open(img_fn).get()  # luminance
        wm_fn = os.path.join(self.wm_dir, self.data_frame.ix[idx, 0])
        wm = pyexr.open(wm_fn).get()  # luminance
        mask_fn = os.path.join(self.mask_dir,
                               self.data_frame.ix[idx, 1] + '.exr')
        mask = pyexr.open(mask_fn).get()
        luminance = 28.2486 * luminance * mask

        shape = self.data_frame.ix[idx, 1]
        light = self.data_frame.ix[idx, 2]

        sigmaT = self.data_frame.ix[idx, 3].astype('float')
        albedo = self.data_frame.ix[idx, 4].astype('float')
        hg = self.data_frame.ix[idx, 5].astype('float')

        patches_luminance = extract_patches(luminance[:, :, 0],
                                            patch_shape=(4, 4),
                                            extraction_step=(4, 4))
        input[:, :, 0] = patches_luminance.mean(-1).mean(-1)
        patches_wm = extract_patches(wm[:, :, 0],
                                     patch_shape=(4, 4),
                                     extraction_step=(4, 4))

        input[:, :, 1] = patches_wm.mean(-1).mean(-1)
        weights = input[:, :, 1]
        target = torch.from_numpy(
            patches_luminance.mean(-1).mean(-1)).unsqueeze(0)

        input = input.transpose((2, 0, 1))
        input = torch.from_numpy(input)

        params = torch.Tensor(3)
        params[0] = sigmaT
        params[1] = albedo
        params[2] = hg

        sample = {
            'input': input,
            'params': params,
            'target': target,
            'shape': shape,
            'light': light,
            'sigmaT': sigmaT,
            'albedo': albedo,
            'hg': hg,
            'fn': self.data_frame.ix[idx, 0],
            'weight': weights
        }

        return sample
예제 #2
0
    def __getitem__(self, idx):

        input = np.zeros(shape=(128, 128, 2))

        img_fn = os.path.join(self.root_dir, self.data_frame.ix[idx, 0])
        luminance = pyexr.open(img_fn).get()  # luminance
        wm_fn = os.path.join(self.wm_dir, self.data_frame.ix[idx, 0])
        wm = pyexr.open(wm_fn).get()

        shape = self.data_frame.ix[idx, 1]
        light = self.data_frame.ix[idx, 2]

        sigmaT = self.data_frame.ix[idx, 3].astype('float')
        albedo = self.data_frame.ix[idx, 4].astype('float')
        hg = self.data_frame.ix[idx, 5].astype('float')

        sigmaS = sigmaT * albedo
        sigmaA = sigmaT - sigmaS
        sigmaS_reduced = (1 - hg) * sigmaS

        patches_luminance = extract_patches(luminance[:, :, 0],
                                            patch_shape=(4, 4),
                                            extraction_step=(4, 4))
        input[:, :, 0] = patches_luminance.mean(-1).mean(-1)
        patches_wm = extract_patches(wm[:, :, 0],
                                     patch_shape=(4, 4),
                                     extraction_step=(4, 4))
        input[:, :, 1] = patches_wm.mean(-1).mean(-1)

        target = torch.from_numpy(
            patches_luminance.mean(-1).mean(-1)).unsqueeze(0)
        weight = torch.from_numpy(patches_wm.mean(-1).mean(-1)).unsqueeze(0)
        input = input.transpose((2, 0, 1))
        input = torch.from_numpy(input)

        params = torch.Tensor(3)
        params[0] = sigmaS
        params[1] = sigmaA
        params[2] = sigmaS_reduced

        sample = {
            'input': input,
            'params': params,
            'target': target,
            'shape': shape,
            'light': light,
            'sigmaS': sigmaS,
            'sigmaA': sigmaA,
            'sigmaS_reduced': sigmaS_reduced,
            'fn': self.data_frame.ix[idx, 0],
            'weightMap': weight
        }

        return sample
예제 #3
0
    def __getitem__(self, index):

        if self.model == 'ours':
            if self.mode == 'Train':
                depth = pyexr.open(self.one_batch[index][2]).get()[:, :, 2:3]
            else:
                depth = 1 + pyexr.open(
                    self.one_batch[index][2]).get()[:, :, 2:3] / 1500.0
            # depth /=5
        else:
            depth = pyexr.open(self.one_batch[index][2]).get()
            xx = (750 + depth[:, :, 0:1]) / 1500.0
            yy = (750 + depth[:, :, 1:2]) / 1500.0
            zz = (1500 + depth[:, :, 2:3]) / 1500.0
            #
            depth = np.concatenate([xx, yy, zz], axis=-1)
            depth /= 3

        # print('depth', np.min(depth))
        # print(depth.shape)
        # plt.imshow(depth[:, :, :3])
        # plt.show()
        # depth *= 1.49
        # depth = depth2position(depth)

        # plt.subplot(131)
        # plt.imshow(xx)
        # plt.subplot(132)
        # plt.imshow(yy)
        # plt.subplot(133)
        # plt.imshow(zz)
        # plt.show()
        # depth = np.concatenate([xx, yy, zz], axis=-1)
        # depth /=3
        # print(np.max(depth), np.min(depth), np.mean(depth))
        # depth = (depth-np.min(depth))/(np.max(depth)-np.min(depth))
        # depth = depth/1.8
        # if np.max(depth) >1:
        #     depth -= 1
        normal = pyexr.open(self.one_batch[index][1]).get()[:, :, :3]

        label = pyexr.open(self.one_batch[index][0]).get()[:, :, 0:1]

        input = np.transpose(np.concatenate((normal, depth), axis=-1),
                             (2, 0, 1))
        label = np.transpose(label, (2, 0, 1))

        # print(np.max(normal), np.min(normal))
        # plt.imshow(np.squeeze(depth))skkkkkkkkk75752
        # plt.show()

        return torch.from_numpy(input), torch.from_numpy(
            label)  # 最后一定要return tensor类型不然会报错
예제 #4
0
def compute_sh(obj_file, cam_pos, cam_lookat):
    light_radiance = 1.0

    register_integrator('auxintegrator', lambda props: AuxIntegrator(props))

    scene_template_file = './scene_template.xml'
    Thread.thread().file_resolver().append(
        os.path.dirname(scene_template_file))

    scene = load_file(scene_template_file, integrator='auxintegrator', fov="40", tx=cam_lookat[0], ty=cam_lookat[1], tz=cam_lookat[2], \
                        spp="100", width=200, height=200, obj_file=obj_file)

    # scene.integrator().light = load_string(LIGHT_TEMPLATE, lsx="1", lsy="1", lsz="1", lrx="0", lry="0", lrz="0", ltx="-1", lty="0", ltz="0", l=light_radiance)
    # scene.integrator().light_radiance = light_radiance

    scene.integrator().render(scene, scene.sensors()[0])
    film = scene.sensors()[0].film()
    film.set_destination_file('./render_output.exr')
    film.develop()

    sh_channels_list = []
    for i in range(0, 9):
        for c in ['r', 'g', 'b']:
            sh_channels_list.append('sh_%s_%d' % (c, i))

    f_sh = np.zeros((200, 200, 27), dtype=np.float)
    exrfile = pyexr.open('render_output.exr')

    for i, channel in enumerate(sh_channels_list):
        ch = exrfile.get(channel)
        f_sh[:, :, i:i + 1] += ch

    return f_sh
def loadFigure(filename):
    try:
        tmp = pyexr.open(filename)
        return tmp

    except Exception as e:
        return None
예제 #6
0
파일: split.py 프로젝트: chellmuth/exrtools
def split_filename(path, tiles):
    exr = pyexr.open(path)
    root_data = {}

    default = exr.get("default")
    height, width, _ = default.shape

    tiles_x, tiles_y = tiles

    tile_width = width // tiles_x
    tile_height = height // tiles_y

    dirname, basename = os.path.split(path)
    name, extension = os.path.splitext(basename)
    split_name = name.split("-")

    split_id = split_name[0]
    split_rest = "-".join(split_name[1:])

    for tile_y in range(tiles_y):
        for tile_x in range(tiles_x):
            tile_index = tile_y * tiles_x + tile_x
            tiled_filename = f"{split_id}_tile{tile_index}-{split_rest}{extension}"

            tiled_path = os.path.join(dirname, tiled_filename)
            print(tiled_path)

            crop.crop_filename(
                path,
                tiled_path,
                (height, width),
                (tile_height, tile_width),
                (tile_y * tile_height, tile_x * tile_width),
            )
예제 #7
0
    def __getitem__(self, idx):
        item = self.data[idx]
        sample_folder = Path(self.dataset_path) / "raw" / self.splitsdir / item
        df_folder = Path(
            self.dataset_path) / "processed" / self.splitsdir / item

        image = Image.open(sample_folder / "rgb.png")
        #image = image.transpose(Image.FLIP_LEFT_RIGHT)
        rgb_img = self.input_transform(image)

        points = []
        occupancies = []
        grids = []

        for sigma in ['0.10', '0.01']:
            sample_points_occ_npz = np.load(df_folder /
                                            f"occupancy_{sigma}.npz")
            boundary_sample_points = sample_points_occ_npz['points']
            boundary_sample_coords = sample_points_occ_npz['grid_coords']
            boundary_sample_occupancies = sample_points_occ_npz['occupancies']
            subsample_indices = np.random.randint(
                0, boundary_sample_points.shape[0], self.num_points)
            points.extend(boundary_sample_points[subsample_indices])
            grids.extend(boundary_sample_coords[subsample_indices])
            occupancies.extend(boundary_sample_occupancies[subsample_indices])

        sample_points = torch.from_numpy(
            np.array(
                points,
                dtype=f'float{self.kwargs.precision}'))  # * (1 - 16 / 64))
        sample_occupancies = torch.from_numpy(
            np.array(occupancies, dtype=f'float{self.kwargs.precision}'))

        distance_map = pyexr.open(str(sample_folder /
                                      "distance.exr")).get("R")[:, :, 0]

        #depthmap target
        intrinsics_matrix = get_intrinsic(Path("data/intrinsics.txt"))
        focal_length = intrinsics_matrix[0][0]
        transform = FromDistanceToDepth(focal_length)
        depth_map = transform(distance_map).numpy().astype(
            f'float{self.kwargs.precision}', casting='same_kind')
        #depth_map = np.flip(depth_map, 1)
        depth_flipped = depth_map.copy()
        depthmap_target = self.target_transform(depth_flipped)
        mesh_path = str(sample_folder / "mesh.obj")

        # GT mesh
        #df_folder = Path(self.dataset_path) / "processed" / self.splitsdir / item
        #sample_target = torch.from_numpy(read_df(str(df_folder / "distance_field.df")).astype(f'float{self.kwargs.precision}')).unsqueeze(0)

        return {
            'name': item,
            'mesh': mesh_path,
            'rgb': rgb_img,
            'points': sample_points,
            'occupancies': sample_occupancies,
            #'target': sample_target.unsqueeze(0),
            'depthmap_target': depthmap_target.squeeze()
        }
예제 #8
0
def filter(exr_filename, out_filename, channels):
    exr = pyexr.open(exr_filename)
    channel_data = {channel: exr.get(channel) for channel in channels}

    if len(channel_data) == 1:
        channel_data["default"] = channel_data.pop(channels[0])

    pyexr.write(out_filename, channel_data)
예제 #9
0
def load_iteration(image_file, image_sqr_file):
    image_exr = pyexr.open(image_file)
    header = image_exr.input_file.header()

    spp = header['spp']
    image_data = pyexr.read(image_file)
    image_sqr_data = pyexr.read(image_sqr_file)
    return image_data, image_sqr_data, spp
예제 #10
0
def _read_exr(filespec, verbose=True):
    exr = pyexr.open(filespec)
    data = exr.get()
    maxval = np.max(data)
    _print(verbose, "Reading OpenEXR file %s " % (filespec), end='')
    _print(
        verbose, "(w=%d, h=%d, c=%d, %s)" %
        (exr.width, exr.height, len(exr.channels), data.dtype))
    return data, maxval
예제 #11
0
def readgen(file):
    if file.endswith('.float3'): return readFloat(file)
    elif file.endswith('.flo'): return readFlow(file)
    elif file.endswith('.ppm'): return readImage(file)
    elif file.endswith('.pgm'): return readImage(file)
    elif file.endswith('.png'): return readImage(file)
    elif file.endswith('.jpg'): return readImage(file)
    elif file.endswith('.pfm'): return readPFM(file)[0]
    elif file.endswith('.exr'): return pyexr.open(file).get() #https://github.com/tvogels/pyexr
    else: raise Exception('don\'t know how to read %s' % file)
def depth_to_gridspace(distance_map, intrinsic_path=None, down_scale_factor=1):
    input_depth = pyexr.open(distance_map).get("R")[:, :, 0]

    intrinsic = get_intrinsic(intrinsic_path)
    focal_length = intrinsic[0][0]

    # distance map to depth map
    transform = FromDistanceToDepth(focal_length)
    depthmap = transform(input_depth)

    return depthmap_to_gridspace(depthmap, intrinsic_path, down_scale_factor)
예제 #13
0
def exr_to_tensor(exr_filepath: str, half: bool) -> torch.Tensor:
    image = pyexr.open(exr_filepath)
    channels = image.channels

    img_array = image.get(precision=image.channel_precision[channels[0]])

    if half:
        img_array = img_array[:image.height // 2, :image.width // 2, :]

    return torch.tensor(np.transpose(img_array, (2, 0, 1)),
                        dtype=torch.float32)
예제 #14
0
def filter_exrs_missing_channel(root_path, channel):
    missing = []

    for root, dirs, files in os.walk(root_path):
        for filename in files:
            if filename.endswith(".exr"):
                full_path = os.path.join(root, filename)
                exr = pyexr.open(full_path)
                if not check_has_channel(exr, channel):
                    missing.append(full_path)

    return missing
예제 #15
0
    def __getitem__(self, index):

        inputs = []
        labels = []
        for i in range(4):
            depth = pyexr.open(self.one_batch[index][i * 3 + 2]).get()[:, :,
                                                                       2:3]
            normal = pyexr.open(self.one_batch[index][i * 3 +
                                                      1]).get()[:, :, :3]
            label = pyexr.open(self.one_batch[index][i * 3 + 0]).get()[:, :,
                                                                       0:1]
            input = np.transpose(np.concatenate((normal, depth), axis=-1),
                                 (2, 0, 1))
            label = np.transpose(label, (2, 0, 1))
            inputs.append(np.expand_dims(input, axis=0))
            labels.append(label)
        inputs = np.concatenate(inputs, axis=0)
        labels = np.concatenate(labels, axis=0)

        return torch.from_numpy(inputs), torch.from_numpy(
            labels)  # 最后一定要return tensor类型不然会报错
예제 #16
0
def read_image(path: str, gray: bool = False) -> np.ndarray:
    hdr = _is_hdr(path)
    # Read image
    if hdr:
        img = pyexr.open(path).get()
    else:
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if not gray:  # Ensure correct color space
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if gray:  # Ensure single channel for gray scale
        img = ensureSingleChannel(img)

    if hdr:  # Always return float
        return img.astype(np.float32)
    else:
        return img.astype(np.float32) / 255
예제 #17
0
파일: crop.py 프로젝트: chellmuth/exrtools
def crop_filename(source_filename,
                  destination_filename,
                  from_shape,
                  to_shape,
                  offset,
                  keep_orig=False):
    exr = pyexr.open(source_filename)
    root_data = {}

    default = exr.get("default")
    assert default.shape[:2] == from_shape

    for channel, data in exr.get_all().items():
        channel_data = data[offset[0]:offset[0] + to_shape[0],
                            offset[1]:offset[1] + to_shape[1], :]

        root_data[channel] = channel_data

    pyexr.write(destination_filename, root_data)
예제 #18
0
def read_and_save_input(prefix, accels):
    global plots_path, images, filepaths, maxvalue
    # Prep variable
    filelist = os.listdir("output")
    for file in sorted(filelist, key=lambda f: order[f.split(prefix.split('=')[1])[1][1:-4]]):
        iswantedaccel = False
        if accels == None: iswantedaccel = True
        else:
            for accel in accels:
                if accel == file.split(prefix.split('=')[1])[1][1:-4]:
                    iswantedaccel = True
                    break
        if iswantedaccel and file.endswith(".exr") and file.startswith(prefix):
            filepath = os.path.join("output", file)
            image = pyexr.open(filepath).get()
            maxvalue = max(maxvalue, np.max(image))
            images.append(image)
            filepaths.append("output/" + file[:-3] + "png")
    print(filepaths)
예제 #19
0
def load_img(filepath):
    """Load HDR or LDR image (either .hdr or .exr for HDR or .png for LDR)."""

    if filepath.endswith('.exr'):
        fp = pyexr.open(filepath)
        img = np.array(fp.get(), dtype=np.float64)
    elif filepath.endswith('.hdr'):
        fp = cv2.imread(filepath, cv2.IMREAD_ANYDEPTH)
        fp = cv2.cvtColor(fp, cv2.COLOR_BGR2RGB)
        img = np.array(fp, dtype=np.float64)
    elif filepath.endswith('.png'):
        fp = cv2.imread(filepath)
        fp = cv2.cvtColor(fp, cv2.COLOR_BGR2RGB)
        # important to be signed, because some metrics' calculations can have intermediate negative values.
        img = np.array(fp, dtype=np.int32)
    else:
        raise Exception('Only HDR and OpenEXR and PNG images are supported')

    return img
예제 #20
0
def genJson(objname):
    objpath = ROOT + "/{}/{}.obj".format(objname, objname)
    vertices = []
    with open(objpath) as objf:
        for line in objf:
            if line.startswith("#"): continue
            values = line.split()
            if len(values) == 0:
                continue
            if values[0] == 'v':
                v = list(map(float, values[1:4]))
                vertices.append([v[0], v[1], v[2]])
    vertices = torch.Tensor(vertices).to('cuda')
    max_p = torch.max(vertices, dim=0)[0]
    min_p = torch.min(vertices, dim=0)[0]
    diag_length = torch.norm(max_p - min_p) * 1.6
    # center = torch.mean(vertices, dim=0)
    center = (max_p + min_p) / 2
    camera_positions = icosavn * diag_length + center
    for index in range(len(camera_positions)):
        d = camera_positions[index]
        j = dict()
        j['max'] = max_p.tolist()
        j['min'] = min_p.tolist()
        j['diag'] = diag_length.tolist()
        j['center'] = center.tolist()
        j["cameraPos"] = d.tolist()
        j['icosavn'] = icosavn[index].tolist()
        j['max_tolerance'] = max_tolerance
        file = pyexr.open("{}/{}/{}/render-{}-{}-d.exr".format(
            ROOT, objname, render_name, objname, index))
        d = file.get("distance.Y")
        d = d.reshape(d.shape[0], d.shape[1])
        d[d > diag * max_tolerance] = 0
        j['maxDepth'] = np.max(d).item()
        with open(
                "{}/{}/{}/render-{}-{}.json".format(ROOT, objname, render_name,
                                                    objname, index), 'w') as f:
            json.dump(j, f)
예제 #21
0
파일: blend_ao.py 프로젝트: ctk2156/DeepAo
import pyexr
import cv2
import matplotlib.pyplot as plt
import numpy as np

ours_128 = pyexr.open(
    'C:\\Users\\zhangdongjiu\\Desktop\\s06-c3_128-exp4-ours.exr').get()[:, :,
                                                                        0]
ours_2048 = pyexr.open(
    'C:\\Users\\zhangdongjiu\\Desktop\\scene_15_Camera003_2048.exr').get(
    )[:, :, 0]

hbao = pyexr.open(
    'C:\\Users\\zhangdongjiu\\Desktop\\S06-C3_2048-1-hbao.exr').get()[:, :, 0]

blend_ = cv2.resize(ours_128, (0, 0),
                    fx=16,
                    fy=16,
                    interpolation=cv2.INTER_CUBIC)
plt.subplot(221)
plt.imshow(np.clip(ours_128 * 255., 0, 255).astype(np.uint8), cmap='gray')

plt.subplot(222)
plt.imshow(np.clip(hbao * 255., 0, 255).astype(np.uint8), cmap='gray')

plt.subplot(223)
plt.imshow(np.clip((blend_ + hbao) * 255. / 2., 0, 255).astype(np.uint8),
           cmap='gray')

plt.subplot(224)
# plt.imshow(np.clip(cv2.resize(ours_128, (0, 0), fx=16, fy=16, interpolation=cv2.INTER_CUBIC)*255., 0, 255).astype(np.uint8), cmap='gray')
예제 #22
0
        ).splitlines()[:2]
        focal_length = float(intrinsic_line_0[2:].split(',')[0])
        cx = float(intrinsic_line_0[2:-2].split(',')[2].strip())
        cy = float(intrinsic_line_1[1:-2].split(',')[2].strip())
        intrinsic = torch.tensor(
            [[focal_length, 0, cx, 0], [0, focal_length, cy, 0], [0, 0, 1, 0],
             [0, 0, 0, 1]],
            device=device)
        return intrinsic


if __name__ == '__main__':
    from torchviz import make_dot, make_dot_from_trace
    import pyexr
    import graphviz

    model = project(11, torch.tensor(3.))
    dm_path = Path(
        "runs"
    ) / "02020244_fast_dev" / "vis" / "00000" / "val_0492_19_depthmap.exr"
    depth_map = pyexr.open(str(dm_path)).get("Z")[:, :, 0]
    depth_map = torch.from_numpy(depth_map).to(device)
    model.to(device)
    pointcloud = model.depthmap_to_gridspace(depth_map).reshape(-1,
                                                                3).unsqueeze(0)
    voxelized_occ = model(pointcloud)
    test = model(
        model.depthmap_to_gridspace(depth_map).reshape(-1, 3).unsqueeze(0))
    a = make_dot(test, params=dict(model.named_parameters()))
    a.render(filename='backwards_intrinsic.png', format='png')
    #visualize_point_list(pointcloud, output_pt_cloud_path)
예제 #23
0
import pyexr
import cv2
import numpy as np

ao = pyexr.open('C:\\Users\\39796\Desktop\\3-gt.exr').get()[:, :, 0]
ao = ao * 255.

ao = np.clip(ao, 0, 255)
ao = ao.astype(np.uint8)
print(ao.shape)
cv2.imwrite('C:\\Users\\39796\Desktop\\3-gt.png', ao)
예제 #24
0
def preprocess_input(filename, gt, debug=False):

    file = pyexr.open(filename)
    data = file.get_all()

    if debug:
        for k, v in data.items():
            print(k, v.dtype)

    # just in case
    for k, v in data.items():
        data[k] = np.nan_to_num(v)

    file_gt = pyexr.open(gt)
    gt_data = file_gt.get_all()

    # just in case
    for k, v in gt_data.items():
        gt_data[k] = np.nan_to_num(v)

    # clip specular data so we don't have negative values in logarithm
    data['specular'] = np.clip(data['specular'], 0, np.max(data['specular']))
    data['specularVariance'] = np.clip(data['specularVariance'], 0,
                                       np.max(data['specularVariance']))
    gt_data['specular'] = np.clip(data['specular'], 0,
                                  np.max(data['specular']))
    gt_data['specularVariance'] = np.clip(gt_data['specularVariance'], 0,
                                          np.max(gt_data['specularVariance']))

    # save albedo
    data['origAlbedo'] = data['albedo'].copy()

    # save reference data (diffuse and specular)
    diff_ref = preprocess_diffuse(gt_data['diffuse'], gt_data['albedo'])
    spec_ref = preprocess_specular(gt_data['specular'])
    diff_sample = preprocess_diffuse(data['diffuse'], data['albedo'])

    data['Reference'] = np.concatenate(
        (diff_ref[:, :, :3].copy(), spec_ref[:, :, :3].copy()), axis=2)
    data['Sample'] = np.concatenate((diff_sample, data['specular']), axis=2)

    # save final input and reference for error calculation
    # apply albedo and add specular component to get final color
    data['finalGt'] = gt_data[
        'default']  #postprocess_diffuse(data['Reference'][:,:,:3], data['albedo']) + data['Reference'][:,:,3:]
    data['finalInput'] = data[
        'default']  #postprocess_diffuse(data['diffuse'][:,:,:3], data['albedo']) + data['specular'][:,:,3:]

    # preprocess diffuse
    data['diffuse'] = preprocess_diffuse(data['diffuse'], data['albedo'])

    # preprocess diffuse variance
    data['diffuseVariance'] = preprocess_diff_var(data['diffuseVariance'],
                                                  data['albedo'])

    # preprocess specular
    data['specular'] = preprocess_specular(data['specular'])

    # preprocess specular variance
    data['specularVariance'] = preprocess_spec_var(data['specularVariance'],
                                                   data['specular'])

    # just in case
    data['depth'] = np.clip(data['depth'], 0, np.max(data['depth']))

    # normalize depth
    max_depth = np.max(data['depth'])
    if (max_depth != 0):
        data['depth'] /= max_depth
        # also have to transform the variance
        data['depthVariance'] /= max_depth * max_depth

    # Calculate gradients of features (not including variances)
    data['gradNormal'] = gradients(data['normal'][:, :, :3].copy())
    data['gradDepth'] = gradients(data['depth'][:, :, :1].copy())
    data['gradAlbedo'] = gradients(data['albedo'][:, :, :3].copy())
    data['gradSpecular'] = gradients(data['specular'][:, :, :3].copy())
    data['gradDiffuse'] = gradients(data['diffuse'][:, :, :3].copy())
    data['gradIrrad'] = gradients(data['default'][:, :, :3].copy())

    # append variances and gradients to data tensors
    data['diffuse'] = np.concatenate(
        (data['diffuse'], data['diffuseVariance'], data['gradDiffuse']),
        axis=2)
    data['specular'] = np.concatenate(
        (data['specular'], data['specularVariance'], data['gradSpecular']),
        axis=2)
    data['normal'] = np.concatenate(
        (data['normalVariance'], data['gradNormal']), axis=2)
    data['depth'] = np.concatenate((data['depthVariance'], data['gradDepth']),
                                   axis=2)

    if debug:
        for k, v in data.items():
            print(k, v.shape, v.dtype)

    X_diff = np.concatenate(
        (data['diffuse'], data['normal'], data['depth'], data['gradAlbedo']),
        axis=2)

    X_spec = np.concatenate(
        (data['specular'], data['normal'], data['depth'], data['gradAlbedo']),
        axis=2)

    assert not np.isnan(X_diff).any()
    assert not np.isnan(X_spec).any()

    print("X_diff shape:", X_diff.shape)
    print(X_diff.dtype, X_spec.dtype)

    data['X_diff'] = X_diff
    data['X_spec'] = X_spec

    remove_channels(
        data, ('diffuseA', 'specularA', 'normalA', 'albedoA', 'depthA',
               'visibilityA', 'colorA', 'gradNormal', 'gradDepth',
               'gradAlbedo', 'gradSpecular', 'gradDiffuse', 'gradIrrad',
               'albedo', 'diffuse', 'depth', 'specular', 'diffuseVariance',
               'specularVariance', 'depthVariance', 'visibilityVariance',
               'colorVariance', 'normalVariance', 'visibility'))

    return data
예제 #25
0
get_ipython().system(u'wget -O gt10.exr https://www.dropbox.com/s/d1qfcmdlx3hukls/35930976-08192spp.exr?dl=1')
  
get_ipython().system(u'wget -O eval1.exr https://www.dropbox.com/s/ion7q7osuyrplvg/41905363-00256spp.exr?dl=1')
get_ipython().system(u'wget -O eval2.exr https://www.dropbox.com/s/e9kyil0isfiu0ng/30265043-00256spp.exr?dl=1')
get_ipython().system(u'wget -O eval3.exr https://www.dropbox.com/s/pu4zkkk3119ytx1/11202348-00256spp.exr?dl=1')
  
get_ipython().system(u'wget -O evalref1.exr https://www.dropbox.com/s/so84617kw551e2m/41905363-08192spp.exr?dl=1')
get_ipython().system(u'wget -O evalref2.exr https://www.dropbox.com/s/uuudw2av8vgzsjw/30265043-08192spp.exr?dl=1')
get_ipython().system(u'wget -O evalref3.exr https://www.dropbox.com/s/gszm89oogjamiyi/11202348-08192spp.exr?dl=1')
"""

# Let's read the image and see what's inside.

# In[4]:

file = pyexr.open("sample.exr")

print("Width:", file.width)
print("Height:", file.height)

print("Available channels:")
file.describe_channels()

print("Default channels:", file.channel_map['default'])


def show_data(data, figsize=(15, 15), normalize=False):
    if normalize:
        data = np.clip(data, 0, 1)**0.45454545
    plt.figure(figsize=figsize)
    imgplot = plt.imshow(data, aspect='equal')
예제 #26
0
import numpy as np
import pyexr
import matplotlib.pyplot as plt
import os

base_dir = 'C:\\Users\\39796\\Desktop\\Ambient Occlosion Paper\\scene_image'
# 'scene_18_Perspective_View180014'
position_path = os.path.join(base_dir, 'scene_18_Perspective_View130013.exr')
# depth_path = os.path.join(base_dir, 'scene_19_Camera001_Z Depth.exr')
# 'scene_18_Perspective_rgb_Z Depth_View090049.exr'
# position = 1+pyexr.open(position_path).get()[:, :, 2]/1500.0
position = pyexr.open(position_path).get()[:, :, :]
print(np.mean(position[:, :, 0]))
# depth = pyexr.open(depth_path).get()[:, :, 2]
# plt.imsave('%s\\scene_18_Perspective_rgb_Z Depth_View090049.png'%base_dir, position)

# plt.subplot(131)
plt.imshow(position)
# plt.subplot(132)
# plt.imshow(depth)
# plt.subplot(133)
# plt.imshow(position-depth)
plt.show()
 def _read_exr(cls, file_path):
     return 1.0 / pyexr.open(file_path).get("Depth.Z").astype(np.float32)
예제 #28
0
        render = '{} {}'.format(render, args.options)
    render = '{} -o {}'.format(render, out_path)
    cmd = render.split()

    # Run and time out after fixed amount of time
    sys.stdout.write('Rendering... ')
    sys.stdout.flush()
    try:
        out = sp.check_output(cmd, shell=False, timeout=args.timeout)
    except sp.TimeoutExpired as e:
        print('done.')

    # Update interactive viewer
    sys.stdout.write('Recomputing metrics... ')
    sys.stdout.flush()
    ref_fp = pyexr.open(args.ref)
    ref = np.array(ref_fp.get())
    img_fp = pyexr.open(out_path)
    img = np.array(img_fp.get())
    test = [{'name': args.name, 'data': img}]

    with open(os.path.join(args.dir, 'data.json'), 'r') as fp:
        data = json.load(fp)
    with open(os.path.join(args.dir, 'stats.json'), 'r') as fp:
        stats = json.load(fp)

    data = update_stats(args.dir, data, ref, test, args.metrics, args.clip,
                        args.epsilon)
    write_data(args.dir, data)
    print('done.')
예제 #29
0
# fp_gt = 'C:\\Users\\39796\\Desktop\\compare\\1-gt.exr'
# fp_nnao = 'C:\\Users\\39796\\Desktop\\compare\\1-nnao.exr'
# fp_deepshading = 'C:\\Users\\39796\\Desktop\\compare\\1-deepshading.exr'

order = 3
base_dir = 'D:\\Projects\\Python\\PycharmProjects\\Ao_pt\\Logs\\test\\i%s' % order
fp_hbao = os.path.join(base_dir, '%s-hbao.exr' % order)
fp_ours = os.path.join(base_dir, '%s-ours.exr' % order)
fp_gt = os.path.join(base_dir, '%s-gt.exr' % order)
fp_nnao = os.path.join(base_dir, '%s-nnao.exr' % order)
fp_deepshading = os.path.join(base_dir, '%s-deepshading.exr' % order)
fp_vao = os.path.join(base_dir, '%s-vao++.exr' % order)

fp_dgi = os.path.join(base_dir, '%s-dgi.png' % order)

gt = pyexr.open(fp_gt).get()[:, :, 0]
ours = pyexr.open(fp_ours).get()[:, :, 0]
# hbao = pyexr.open(fp_hbao).get()[:, :, 0]
# nnao = pyexr.open(fp_nnao).get()[:, :, 0]
# deepshading = pyexr.open(fp_deepshading).get()[:, :, 0]
vao = pyexr.open(fp_vao).get()[:, :, 0]
# dgi = cv2.imread(fp_dgi)
# dgi = dgi[:, :, (2, 1, 0)]
plt.imsave(
    'C:\\Users\\39796\Desktop\\Ambient Occlosion Paper\\Experiment\\%s-gt.png'
    % order,
    gt,
    cmap='gray')
# plt.imsave('C:\\Users\\39796\Desktop\\Ambient Occlosion Paper\\Experiment\\%s-hbao.png' % order, hbao, cmap='gray')
# plt.imsave('C:\\Users\\39796\Desktop\\Ambient Occlosion Paper\\Experiment\\%s-nnao.png' % order, nnao, cmap='gray')
# plt.imsave('C:\\Users\\39796\Desktop\\Ambient Occlosion Paper\\Experiment\\%s-deepshading.png' % order, deepshading, cmap='gray')
예제 #30
0
    objname = objectns[i]
    objpath = ROOT + "/{}/{}.obj".format(objname, objname)
    thePath = ROOT + "/{}/render20".format(objname)
    cp, c, diag = AABB(objpath)
    if not os.path.exists(thePath):
        os.makedirs(thePath)
    renderids = genXML(cp, c, objname)
    for j in range(len(renderids)):
        id = renderids[j]
        c = "C:/Mitsuba/mitsuba.exe " + "\"{}/{}/render20/{}.xml\"".format(
            ROOT, objname, id)
        print(c)
        check_output(c, shell=True)
        if id.split("-")[-1] != "d":
            c = "C:/Mitsuba/mtsutil.exe tonemap -o " + "\"{}/{}/render20/{}.png\"".format(ROOT, objname, id) + " " + \
                "\"{}/{}/render20/{}.exr\"".format(ROOT, objname, id)
            check_output(c, shell=True)
        elif id.split("-")[-1] == "d":
            file = pyexr.open("{}/{}/render20/{}.exr".format(
                ROOT, objname, id))
            d = file.get("distance.Y")
            d = d.reshape(d.shape[0], d.shape[1])
            # d[np.isinf(d)] = 0
            d[d > diag * max_tolerance] = 0
            d = d / np.max(d)
            d = d * 255.0
            scipy.misc.imsave(
                "{}/{}/render20/{}.png".format(ROOT, objname, id), d)
        print("finish {}".format(id))
    genJson(objname)