示例#1
0
def test_factorized(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix=''):
    # Pre-process
    cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
    ### Encoding
    strings, min_v, max_v, shape = compress_factorized(cubes, model, ckpt_dir)
    # Write files
    filename = os.path.split(input_file)[-1][:-4]
    print(filename)
    rootdir = './compressed'+ postfix +'/'
    bytes_strings, bytes_pointnums, bytes_cubepos = write_binary_files_factorized(
        filename, strings.numpy(), points_numbers, cube_positions,
        min_v.numpy(), max_v.numpy(), shape.numpy(), rootdir)
    # Read files
    strings_d, points_numbers_d, cube_positions_d, min_v_d, max_v_d, shape_d = \
        read_binary_files_factorized(filename, rootdir)
    # Decoding
    cubes_d = decompress_factorized(strings_d, min_v_d, max_v_d, shape_d, model, ckpt_dir)

    # bpp
    N = get_points_number(input_file)
    bpp = round(8*(bytes_strings + bytes_pointnums + bytes_cubepos)/float(N), 4)
    bpp_strings = round(8*bytes_strings/float(N), 4)
    bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
    bpp_cubepos = round(8*bytes_cubepos/float(N), 4)
    bpp_strings_hyper = 0
    bpp_strings_head = 0
    bpps = [bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums, bpp_cubepos]

    return cubes_d, cube_positions_d, points_numbers_d, N, bpps
示例#2
0
文件: eval.py 项目: ywu40/PCGCv1
def test_hyper(input_file,
               model,
               ckpt_dir,
               scale,
               cube_size,
               min_num,
               postfix=''):
    # Pre-process
    cubes, cube_positions, points_numbers = preprocess(input_file, scale,
                                                       cube_size, min_num)
    ### Encoding
    y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_ds = compress_hyper(
        cubes, model, ckpt_dir, True)
    # Write files
    filename = os.path.split(input_file)[-1][:-4]
    print(filename)
    rootdir = './compressed' + postfix + '/'
    bytes_strings, bytes_strings_head, bytes_strings_hyper, bytes_pointnums, bytes_cubepos = write_binary_files_hyper(
        filename, y_strings.numpy(), z_strings.numpy(), points_numbers,
        cube_positions, y_min_vs.numpy(), y_max_vs.numpy(), y_shape.numpy(),
        z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir)
    # Read files
    y_strings_d, z_strings_d, points_numbers_d, cube_positions_d,  y_min_vs_d, y_max_vs_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d =  \
        read_binary_files_hyper(filename, rootdir)
    # Decoding
    cubes_d = decompress_hyper(y_strings_d, y_min_vs_d.astype('int32'),
                               y_max_vs_d.astype('int32'), y_shape_d,
                               z_strings_d, z_min_v_d, z_max_v_d, z_shape_d,
                               model, ckpt_dir)
    # cheat!!!
    ##############
    print("decoding error on gpu", "!" * 20,
          np.max(tf.abs(cubes_d - x_ds).numpy()), "!" * 20)
    cubes_d = x_ds
    ##############
    # bpp
    N = get_points_number(input_file)
    bpp = round(
        8 * (bytes_strings + bytes_strings_head + bytes_strings_hyper +
             bytes_pointnums + bytes_cubepos) / float(N), 4)

    bpp_strings = round(8 * bytes_strings / float(N), 4)
    bpp_strings_hyper = round(8 * bytes_strings_hyper / float(N), 4)
    bpp_strings_head = round(8 * bytes_strings_head / float(N), 4)
    bpp_pointsnums = round(8 * bytes_pointnums / float(N), 4)
    bpp_cubepos = round(8 * bytes_cubepos / float(N), 4)
    bpps = [
        bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums,
        bpp_cubepos
    ]

    return cubes_d, cube_positions_d, points_numbers_d, N, bpps
示例#3
0
def eval(input_file, config, config_file):
    # model = 'model_voxception'
    filename = os.path.split(input_file)[-1][:-4]
    output_file = filename + '_rec.ply'
    # input_file_n = input_file[:-4]+'_n.ply'
    input_file_n = input_file    
    
    cube_size = config.getint('DEFAULT', 'cube_size')
    min_num = config.getint('DEFAULT', 'min_num')
    res = config.getint('DEFAULT', 'resolution')

    print('cube size:', cube_size, 'min num:', min_num, 'res:', res)

    for index, rate in enumerate(config.sections()):
        scale = float(config.get(rate, 'scale'))
        ckpt_dir = str(config.get(rate, 'ckpt_dir'))
        print('====================', 'config:', rate, 'scale:', scale, 'ckpt_dir:', ckpt_dir)

        # Pre-process
        cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
        ### Encoding
        y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_ds =         compress_less_mem(cubes, model, ckpt_dir, True)
        # Write files
        filename = os.path.split(input_file)[-1][:-4]
        print(filename)
        rootdir = './compressed/'
        bytes_strings, bytes_strings_head, bytes_strings_hyper, bytes_pointnums, bytes_cubepos =         write_binary_files_less_mem(
        filename, y_strings.numpy(), z_strings.numpy(), points_numbers, cube_positions,
        y_min_vs.numpy(), y_max_vs.numpy(), y_shape.numpy(), 
        z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir)
        # Read files
        y_strings_d, z_strings_d, points_numbers_d, cube_positions_d,         y_min_vs_d, y_max_vs_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d =         read_binary_files_less_mem(filename, rootdir)
        # Decoding
        cubes_d = decompress_less_mem(y_strings_d, y_min_vs_d.astype('int32'), y_max_vs_d.astype('int32'), 
                                      y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, model, ckpt_dir)
        # cheat!!!
        ##############
        cubes_d = x_ds
        ##############
        # bpp
        N = get_points_number(input_file)
        bpp = round(8*(bytes_strings + 
                       bytes_strings_head + 
                       bytes_strings_hyper + 
                       bytes_pointnums + 
                       bytes_cubepos)/float(N), 4)

        bpp_strings = round(8*bytes_strings/float(N), 4)
        bpp_strings_head = round(8*bytes_strings_head/float(N), 4)
        bpp_strings_hyper = round(8*bytes_strings_hyper/float(N), 4)
        bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
        bpp_cubepos = round(8*bytes_cubepos/float(N), 4)

        ########## Post-process ##########
        # select rho for optimal d1/d2 metrics.
        if config.has_option(rate, 'rho_d1'):
            rho_d1 = float(config.get(rate, 'rho_d1'))
        else:
            rho_d1 = select_rho("mseF,PSNR (p2point)", input_file, output_file, input_file_n, 
                            cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, res)
            config.set(rate, 'rho_d1', str(rho_d1)) 
            config.write(open(config_file, 'w'))

        if config.has_option(rate, 'rho_d2'):
            rho_d2 = float(config.get(rate, 'rho_d2'))
        else:
            rho_d2 = select_rho("mseF,PSNR (p2plane)", input_file, output_file, input_file_n, 
                            cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, res)
            config.set(rate, 'rho_d2', str(rho_d2))
            config.write(open(config_file, 'w'))

        # metrics.
        for index_rho, rho in enumerate((1.0, rho_d1, rho_d2)):
            postprocess(output_file, cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, rho)

            # distortion
            results = pc_error(input_file, output_file, input_file_n, res, show=False)

            # bpp
            results["n_points"] = get_points_number(output_file)
            results["rho"] = rho
            results["ori_points"] = N
            results["scale"] = scale
            results["bpp_strings"] = bpp_strings
            results["bpp_strings_head"] = bpp_strings_head
            results["bpp_strings_hyper"] = bpp_strings_hyper
            results["bpp_pointsnums"] = bpp_pointsnums
            results["bpp_cubepos"] = bpp_cubepos
            results["bpp"] = bpp

            print(results)

            if index_rho == 0:
                if index == 0:
                    all_results = results.copy(deep=True)
                else:
                    all_results = all_results.append(results, ignore_index=True)
            elif index_rho == 1:
                if index == 0:
                    all_results_d1 = results.copy(deep=True)
                else:
                    all_results_d1 = all_results_d1.append(results, ignore_index=True)
            else:
                if index == 0:
                    all_results_d2 = results.copy(deep=True)
                else:
                    all_results_d2 = all_results_d2.append(results, ignore_index=True)    

    # write to csv
    print(all_results)
    print(all_results_d1)
    print(all_results_d2)
    csv_root_dir = './CSV/hyper/'
    if not os.path.exists(csv_root_dir):
        os.makedirs(csv_root_dir)

    csv_name = os.path.join(csv_root_dir, filename + '.csv')
    all_results.to_csv(csv_name, index=False)

    csv_name_d1 = os.path.join(csv_root_dir, filename + '_d1.csv')
    all_results_d1.to_csv(csv_name_d1, index=False)

    csv_name_d2 = os.path.join(csv_root_dir, filename + '_d2.csv')
    all_results_d2.to_csv(csv_name_d2, index=False)
    
    return all_results, all_results_d1, all_results_d2