def create_hdf5_container(path_i1, path_o, lf_name):

    px = 48
    py = 48

    nviews = 9

    sx = 16
    sy = 16

    file = h5py.File(path_o + '/' + lf_name + '.hdf5', 'w')

    # read diffuse color
    LF = file_io.read_lightfield(path_i1)
    LF = LF.astype(np.float32)  # / 255.0

    cv_gt = lf_tools.cv(LF)
    lf_tools.save_image(path_o + '/' + lf_name, cv_gt)

    # maybe we need those, probably not.
    param_dict = file_io.read_parameters(path_i1)

    dset_blocks = []
    # block count: write out one individual light field
    cx = np.int32((LF.shape[3] - px) / sx) + 1
    cy = np.int32((LF.shape[2] - py) / sy) + 1

    for i, j in itertools.product(np.arange(0, nviews), np.arange(0, nviews)):
        dset_blocks.append(
            file.create_dataset('views%d%d' % (i, j), (cy, cx, 3, px, py),
                                chunks=(1, 1, 3, px, py),
                                maxshape=(None, None, 3, px, py)))
    # lists indexed in 2D
    dset_blocks = [
        dset_blocks[x:x + nviews] for x in range(0, len(dset_blocks), nviews)
    ]

    sys.stdout.write(lf_name + ': ')

    for bx in np.arange(0, cx):
        sys.stdout.write('.')
        sys.stdout.flush()

        for by in np.arange(0, cy):

            x = bx * sx
            y = by * sx

            # extract data
            for i, j in itertools.product(np.arange(0, nviews),
                                          np.arange(0, nviews)):
                dset_blocks[i][j][bx, by, :, :, :] = np.transpose(
                    np.array(LF[i, j, x:x + px, y:y + py, :]),
                    (-1, 0, 1)).reshape(3, px, py)

    sys.stdout.write(' Done.\n')
示例#2
0
cv = np.zeros([2, 512, 512, 3])
dispgt = np.zeros([2, 512, 512])
n = 0
for lf_name in data_folders:

    data_folder = os.path.join(data_source, lf_name)

    LF = file_io.read_lightfield(data_folder)
    LF = LF.astype(np.float32)

    disp = file_io.read_disparity(data_folder)
    disp_gt = np.array(disp[0])
    disp_gt = np.flip(disp_gt, 0)
    dispgt[n, :, :] = disp_gt

    cv[n, :, :, :] = lf_tools.cv(LF)
    n = n + 1

stack_h, stack_v = refocus_cross(cv, dispgt, 9)

# for k in range(0,9):
#     plt.figure(k)
#     plt.imshow(stack_h[k, :, :, :])
#     plt.show()
#

# for k in range(0, 9):
#     plt.figure(k)
#     plt.imshow(np.abs(stack_h[k, :, :, :] - cv.transpose(1,0,2)))
k = 0
#
# loop over all datasets, write out each dataset in patches
# to feed to autoencoder in random order
#
index = 0
for lf_name in data_folders:

    data_file = os.path.join(data_source, lf_name)

    # input LF
    mat_content = h5py.File(data_file, 'r')
    LF = mat_content['LF'].value
    LF = np.transpose(LF, (4, 3, 2, 1, 0))

    cv_gt = lf_tools.cv(LF)
    lf_tools.save_image(training_data_dir + 'input' + lf_name, cv_gt)

    # write out one individual light field
    # block count
    cx = np.int32((LF.shape[3] - px) / sx) + 1
    cy = np.int32((LF.shape[2] - py) / sy) + 1

    for by in np.arange(0, cy):
        sys.stdout.write('.')
        sys.stdout.flush()

        for bx in np.arange(0, cx):

            x = bx * sx
            y = by * sx
示例#4
0
        tmp[LF_sh_old == 0] = 1
        alpha = np.divide(LF_sh, tmp)
        alpha[LF_sh_old == 0] = 1
        alpha[np.isnan(alpha)] = 1
        alpha[np.isinf(alpha)] = 1
        del LF_sh_old
    else:
        alpha = 1

    # glossy LF
    LF_specular = np.multiply(LF_gc, np.add(LF_gd, LF_gi))
    LF_specular = np.multiply(alpha, LF_specular)
    # diffuse LF
    LF_diffuse = np.multiply(LF_albedo, LF_sh)
    # show center view
    cv_diffuse = lf_tools.cv(LF_diffuse)
    # show center view
    cv_specular = lf_tools.cv(LF_specular)
    # lf_tools.save_image( training_data_dir + 'specular' +lf_name, cv_specular)
    # input LF
    LF = np.add(LF_diffuse, LF_specular)
    cv_gt = lf_tools.cv(LF)

    # imean = 0.3
    # factor = imean / np.mean(cv_gt)
    # LF_diffuse = LF_diffuse*factor
    # LF_specular = LF_specular*factor
    # LF = np.add(LF_diffuse, LF_specular)
    # cv_gt = lf_tools.cv(LF)

    disp = file_io.read_disparity(data_folder)
示例#5
0
    nan_LF_specular = np.sum(np.isnan(LF_specular) == True)
    nan_disp = np.sum(np.isnan(disp_gt) == True)

    inf_LF = np.sum(np.isinf(LF) == True)
    inf_LF_albedo = np.sum(np.isinf(LF_albedo) == True)
    inf_LF_sh = np.sum(np.isinf(LF_sh) == True)
    inf_LF_specular = np.sum(np.isinf(LF_specular) == True)
    inf_disp = np.sum(np.isinf(disp_gt) == True)

    naninf_sum = nan_disp + nan_LF + nan_LF_albedo + nan_LF_sh + nan_LF_specular + inf_disp + inf_LF + inf_LF_albedo + inf_LF_sh + inf_LF_specular
    if naninf_sum > 0:
        print('inf_nan' + lf_name)
        lf_name = lf_name + '_inf_nan'

    lf_tools.save_image(data_source + '\\' + 'input_' + lf_name,
                        lf_tools.cv(LF))
    lf_tools.save_image(data_source + '\\' + 'albedo_' + lf_name,
                        lf_tools.cv(LF_albedo))
    lf_tools.save_image(
        data_source + '\\' + 'sh_' + lf_name + '_' + np.str(np.amax(LF_sh)),
        lf_tools.cv(LF_sh))
    lf_tools.save_image(
        data_source + '\\' + 'specular_' + lf_name + '_' +
        np.str(np.amax(LF_specular)), lf_tools.cv(LF_specular))

    # write out one individual light field
    # block count
    cx = np.int32((LF.shape[3] - px) / sx) + 1
    cy = np.int32((LF.shape[2] - py) / sy) + 1

    for by in np.arange(0, cy):
    if folder == 'Flowers':
        data_folders = os.listdir(data_source + folder + '/')
        for lf_name in data_folders:
            real_name = lf_name[0:-8]
            data_folder = os.path.join(data_source, lf_name)
            print("now %i / %i" % (idx_folder + 1, len(data_folders)))
            idx_folder = idx_folder + 1

            data_path = data_source + folder + '/' + lf_name
            f = h5py.File(data_path, 'r')

            # # flowers and sythetic
            LF_temp = np.transpose(f['LF'], (4, 3, 2, 1, 0))
            LF_temp = LF_temp.astype(np.float32)

            cv_gt = lf_tools.cv(LF_temp)

            # ############################################################################################
            # # evail hack: make the light field brighter
            # ############################################################################################
            if lf_name[0:3] == 'IMG':
                imean = 0.15
                factor = imean / np.mean(cv_gt)

                LF_temp = LF_temp * factor

                LF_temp = np.clip(LF_temp, 0, 1)

            # ############################################################################################
            # ############################################################################################
示例#7
0
# wait a bit to not skew timing results with initialization
time.sleep(20)

# loop over all datasets and collect errors
results = []

for lf_name in data_folders:
    file = h5py.File(result_folder + lf_name[3] + '.hdf5', 'w')
    data_file = data_eval_folder + lf_name[0] + "/" + lf_name[1] + "/" + lf_name[2] + "/" + \
                    lf_name[3] + ".hdf5"
    hdf_file = h5py.File(data_file, 'r')
    # hard-coded size, just for testing
    LF_LR = hdf_file['LF_LR']
    LF_HR = hdf_file['LF']
    cv_gt = lf_tools.cv(LF_HR)
    cv_LR = lf_tools.cv(LF_LR)

    data = []

    color_space = hp.config['ColorSpace']

    if color_space == 'YUV':

        cv_gt_orig = rgb2YUV(cv_gt)
        cv_LR_orig = rgb2YUV(cv_LR)

        decoder_path = 'Y'

        result_cv = encode_decode_lightfield(
            data,
        tmp[LF_sh_old == 0] = 1
        alpha = np.divide(LF_sh, tmp)
        alpha[LF_sh_old == 0] = 1
        alpha[np.isnan(alpha)] = 1
        alpha[np.isinf(alpha)] = 1
        del LF_sh_old
    else:
        alpha = 1

    # glossy LF
    LF_specular = np.multiply(LF_gc, np.add(LF_gd, LF_gi))
    LF_specular = np.multiply(alpha, LF_specular)
    # diffuse LF
    LF_diffuse = np.multiply(LF_albedo, LF_sh)
    # show center view
    cv_diffuse = lf_tools.cv(LF_diffuse)
    # show center view
    cv_specular = lf_tools.cv(LF_specular)
    # lf_tools.save_image( training_data_dir + 'specular' +lf_name, cv_specular)
    # input LF
    LF_temp = np.add(LF_diffuse, LF_specular)
    cv_gt = np.clip(lf_tools.cv(LF_temp), 0, 1)

    ############################################################################################
    imean = 0.3
    factor = imean / np.mean(cv_gt)

    LF_temp = LF_temp * factor

    LF_temp = np.clip(LF_temp, 0, 1)
    cv_gt_2 = lf_tools.cv(LF_temp)
    alpha[np.isinf(alpha)] = 1
    del LF_sh_old
  else:
    alpha = 1

  # glossy LF
  LF_specular = np.multiply(LF_gc, np.add(LF_gd, LF_gi))
  LF_specular = np.multiply(alpha, LF_specular)
  # input LF
  LF = np.add(np.multiply(LF_dc, LF_sh),LF_specular)

  dset_LF_albedo = file.create_dataset('LF_albedo', data=LF_albedo)
  dset_LF_sh = file.create_dataset('LF_sh', data=LF_sh)
  dset_LF_specular = file.create_dataset('LF_specular', data=LF_specular)

  lf_tools.save_image(data_source + '\\' + 'input_' + lf_name, lf_tools.cv(LF))
  lf_tools.save_image(data_source + '\\' + 'albedo_' + lf_name, lf_tools.cv(LF_albedo))
  lf_tools.save_image(data_source + '\\' + 'sh_' + lf_name + '_' + np.str(np.amax(LF_sh)), lf_tools.cv(LF_sh))
  lf_tools.save_image(data_source + '\\' + 'specular_' + lf_name + '_' + np.str(np.amax(LF_specular)),
                      lf_tools.cv(LF_specular))

  # input LF

  dset_LF = file.create_dataset('LF', data = LF)

  disp = file_io.read_disparity( data_folder )
  disp_gt = np.array( disp[0] )
  disp_gt = np.flip( disp_gt,0 )
  dset_disp = file.create_dataset('LF_disp', data=disp_gt)

  # next dataset
示例#10
0
# wait a bit to not skew timing results with initialization
time.sleep(20)

# loop over all datasets and collect errors
results = []
for lf_name in data_folders:
    file = h5py.File(result_folder + lf_name[3] + '.hdf5', 'w')
    if lf_name[1] == 'intrinsic':
    # stored directly in hdf5
        data_file = data_eval_folder + lf_name[0] + "/" + lf_name[1] + "/" + lf_name[2] + "/" + \
                lf_name[3] + ".hdf5"
        hdf_file = h5py.File( data_file, 'r')
        # hard-coded size, just for testing
        LF = hdf_file[ 'LF' ]
        cv_gt = lf_tools.cv( LF )

        LF_diffuse_gt = hdf_file[ 'LF_diffuse' ]
        diffuse_gt = lf_tools.cv( LF_diffuse_gt )

        LF_specular_gt = hdf_file[ 'LF_specular' ]
        specular_gt = lf_tools.cv( LF_specular_gt )

        disp_gt = hdf_file[ 'LF_disp' ]

        dmin = np.min(disp_gt)
        dmax = np.max(disp_gt)
    elif lf_name[1] == 'benchmark':
        data_file = data_eval_folder + lf_name[0] + "/" + lf_name[1] + "/" + lf_name[2] + "/" + \
                    lf_name[3] + ".hdf5"
        hdf_file = h5py.File(data_file, 'r')
示例#11
0
    data_folder = os.path.join(data_source, lf_name)

    LF = file_io.read_lightfield(data_folder)
    LF = LF.astype(np.float32)
    LF_NG = file_io.read_lightfield(data_folder)
    LF_NG = LF_NG.astype(np.float32)

    LF_LR_G = np.zeros((LF.shape[0], LF.shape[1], int(
        LF.shape[2] / scale), int(LF.shape[3] / scale), int(LF.shape[4])),
                       np.float32)
    LF_LR_NG = np.zeros((LF.shape[0], LF.shape[1], int(
        LF.shape[2] / scale), int(LF.shape[3] / scale), int(LF.shape[4])),
                        np.float32)

    cv_gt = lf_tools.cv(LF)

    for v in range(0, nviews):
        for h in range(0, nviews):
            LF[v, h, :, :, :] = gaussian_filter(LF[v, h, :, :, :],
                                                sigma=0.5,
                                                truncate=2)
            LF_LR_G[v, h, :, :, :] = LF[v, h, 0:LF.shape[2] - 1:scale,
                                        0:LF.shape[3] - 1:scale, :]
            LF_LR_NG[v, h, :, :, :] = LF_NG[v, h, 0:LF.shape[2] - 1:scale,
                                            0:LF.shape[3] - 1:scale, :]

    cv_G = lf_tools.cv(LF_LR_G)
    cv_NG = lf_tools.cv(LF_LR_NG)

    plt.figure(0)