Example #1
0
def select_optimal_rho(item, rhos, input_file, output_file, input_file_n,
                       cubes_d, points_numbers_d, cube_positions_d, scale,
                       cube_size, res):
    for i, rho in enumerate(rhos):
        print('===== select rho =====')
        postprocess(output_file, cubes_d, points_numbers_d, cube_positions_d,
                    scale, cube_size, rho)
        results = pc_error(input_file,
                           output_file,
                           input_file_n,
                           res,
                           show=False)
        PSNR = float(results[item])
        print('===== results: ', i, rho, item, PSNR)
        if i == 0:
            MAX_PSNR = 0
            optimal_rho = rho
        else:
            MAX_PSNR = max(PSNR, MAX_PSNR)

        if PSNR < MAX_PSNR:
            break
        else:
            optimal_rho = rho

    return optimal_rho
def select_rho(item, input_file, output_file, input_file_n, cubes_d,
               points_numbers_d, cube_positions_d, scale, cube_size, res):

    steps = [0.02] * 4 + [0.04] * 4 + [0.08] * 4 + [0.16] * 4 + [0.32] * 4
    MAX = 0
    rho = 1.0
    optimal_rho = 1.0

    for i, step in enumerate(steps):
        print('===== select rho =====')
        postprocess(output_file, cubes_d, points_numbers_d, cube_positions_d,
                    scale, cube_size, rho)
        results = pc_error(input_file,
                           output_file,
                           input_file_n,
                           res,
                           show=False)
        """
        # record results.
        results["n_points"] = get_points_number(output_file)
        results["rho"] = rho
        if i == 0:
            all_results = results.copy(deep=True)
        else:
            all_results = all_results.append(results, ignore_index=True)
        """

        PSNR = float(results[item])
        print('===== results: ', i, rho, item, PSNR)

        MAX = max(PSNR, MAX)
        if PSNR < MAX:
            break
        else:
            optimal_rho = rho

        if item == "mseF,PSNR (p2point)":
            rho += step
        elif item == "mseF,PSNR (p2plane)":
            rho -= step
        else:
            print('ERROR', item)
            break

    return optimal_rho
Example #3
0
def eval(input_file, rootdir, resolution, mode, cube_size, modelname,
         fixed_thres, postfix):
    # model = 'model_voxception'
    model = importlib.import_module(modelname)

    filename = os.path.split(input_file)[-1][:-4]
    output_file = filename + '_rec_' + postfix + '.ply'
    input_file_n = input_file
    csv_rootdir = os.path.join(rootdir, "csv")
    if not os.path.exists(csv_rootdir):
        os.makedirs(csv_rootdir)
    cfg_rootdir = os.path.join(rootdir, "cfg")
    if not os.path.exists(cfg_rootdir):
        os.makedirs(cfg_rootdir)
    # default config
    config, config_file = set_default_config(input_file, cfg_rootdir,
                                             resolution, mode, cube_size,
                                             modelname)
    cube_size = config.getint('DEFAULT', 'cube_size')
    min_num = config.getint('DEFAULT', 'min_num')
    res = config.getint('DEFAULT', 'resolution')
    print('cube size:', cube_size, 'min num:', min_num, 'res:', res)

    for index, rate in enumerate(config.sections()):
        scale = float(config.get(rate, 'scale'))
        ckpt_dir = str(config.get(rate, 'ckpt_dir'))
        print('====================', 'config:', rate, 'scale:', scale,
              'ckpt_dir:', ckpt_dir)

        if mode == "factorized":
            cubes_d, cube_positions, points_numbers, N, bpps = test_factorized(
                input_file, model, ckpt_dir, scale, cube_size, min_num,
                postfix)
        elif mode == "hyper":
            cubes_d, cube_positions, points_numbers, N, bpps = test_hyper(
                input_file, model, ckpt_dir, scale, cube_size, min_num,
                postfix)
        cubes_d = cubes_d.numpy()
        print("bpp:", bpps[0])
        # select rho for optimal d1/d2 metrics.
        if fixed_thres == None:
            rho_d1, rho_d2 = cfg_post_process(config, config_file, rate,
                                              input_file, output_file,
                                              input_file_n, cubes_d,
                                              points_numbers, cube_positions,
                                              scale, cube_size, res)
        else:
            rho_d1, rho_d2 = 1.0, 1.0

        # metrics.
        rho = 1.0
        postprocess(output_file, cubes_d, points_numbers, cube_positions,
                    scale, cube_size, rho, fixed_thres)
        results = pc_error(input_file,
                           output_file,
                           input_file_n,
                           res,
                           show=False)

        rho = rho_d1
        postprocess(output_file, cubes_d, points_numbers, cube_positions,
                    scale, cube_size, rho, fixed_thres)
        results_d1 = pc_error(input_file,
                              output_file,
                              input_file_n,
                              res,
                              show=False)

        rho = rho_d2
        postprocess(output_file, cubes_d, points_numbers, cube_positions,
                    scale, cube_size, rho, fixed_thres)
        results_d2 = pc_error(input_file,
                              output_file,
                              input_file_n,
                              res,
                              show=False)

        results = collect_results(results, results_d1, results_d2, bpps, N,
                                  scale, rho_d1, rho_d2)

        if index == 0:
            all_results = results.copy(deep=True)
        else:
            all_results = all_results.append(results, ignore_index=True)

    # write to csv
    print(all_results)
    if not os.path.exists(csv_rootdir):
        os.makedirs(csv_rootdir)
    csv_name = os.path.join(csv_rootdir, filename + '.csv')
    all_results.to_csv(csv_name, index=False)
    # plot
    plot_results(all_results, filename, csv_rootdir)
    return all_results
Example #4
0
def eval(input_file, config, config_file):
    # model = 'model_voxception'
    filename = os.path.split(input_file)[-1][:-4]
    output_file = filename + '_rec.ply'
    # input_file_n = input_file[:-4]+'_n.ply'
    input_file_n = input_file    
    
    cube_size = config.getint('DEFAULT', 'cube_size')
    min_num = config.getint('DEFAULT', 'min_num')
    res = config.getint('DEFAULT', 'resolution')

    print('cube size:', cube_size, 'min num:', min_num, 'res:', res)

    for index, rate in enumerate(config.sections()):
        scale = float(config.get(rate, 'scale'))
        ckpt_dir = str(config.get(rate, 'ckpt_dir'))
        print('====================', 'config:', rate, 'scale:', scale, 'ckpt_dir:', ckpt_dir)

        # Pre-process
        cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
        ### Encoding
        y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_ds =         compress_less_mem(cubes, model, ckpt_dir, True)
        # Write files
        filename = os.path.split(input_file)[-1][:-4]
        print(filename)
        rootdir = './compressed/'
        bytes_strings, bytes_strings_head, bytes_strings_hyper, bytes_pointnums, bytes_cubepos =         write_binary_files_less_mem(
        filename, y_strings.numpy(), z_strings.numpy(), points_numbers, cube_positions,
        y_min_vs.numpy(), y_max_vs.numpy(), y_shape.numpy(), 
        z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir)
        # Read files
        y_strings_d, z_strings_d, points_numbers_d, cube_positions_d,         y_min_vs_d, y_max_vs_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d =         read_binary_files_less_mem(filename, rootdir)
        # Decoding
        cubes_d = decompress_less_mem(y_strings_d, y_min_vs_d.astype('int32'), y_max_vs_d.astype('int32'), 
                                      y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, model, ckpt_dir)
        # cheat!!!
        ##############
        cubes_d = x_ds
        ##############
        # bpp
        N = get_points_number(input_file)
        bpp = round(8*(bytes_strings + 
                       bytes_strings_head + 
                       bytes_strings_hyper + 
                       bytes_pointnums + 
                       bytes_cubepos)/float(N), 4)

        bpp_strings = round(8*bytes_strings/float(N), 4)
        bpp_strings_head = round(8*bytes_strings_head/float(N), 4)
        bpp_strings_hyper = round(8*bytes_strings_hyper/float(N), 4)
        bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
        bpp_cubepos = round(8*bytes_cubepos/float(N), 4)

        ########## Post-process ##########
        # select rho for optimal d1/d2 metrics.
        if config.has_option(rate, 'rho_d1'):
            rho_d1 = float(config.get(rate, 'rho_d1'))
        else:
            rho_d1 = select_rho("mseF,PSNR (p2point)", input_file, output_file, input_file_n, 
                            cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, res)
            config.set(rate, 'rho_d1', str(rho_d1)) 
            config.write(open(config_file, 'w'))

        if config.has_option(rate, 'rho_d2'):
            rho_d2 = float(config.get(rate, 'rho_d2'))
        else:
            rho_d2 = select_rho("mseF,PSNR (p2plane)", input_file, output_file, input_file_n, 
                            cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, res)
            config.set(rate, 'rho_d2', str(rho_d2))
            config.write(open(config_file, 'w'))

        # metrics.
        for index_rho, rho in enumerate((1.0, rho_d1, rho_d2)):
            postprocess(output_file, cubes_d, points_numbers_d, cube_positions_d, scale, cube_size, rho)

            # distortion
            results = pc_error(input_file, output_file, input_file_n, res, show=False)

            # bpp
            results["n_points"] = get_points_number(output_file)
            results["rho"] = rho
            results["ori_points"] = N
            results["scale"] = scale
            results["bpp_strings"] = bpp_strings
            results["bpp_strings_head"] = bpp_strings_head
            results["bpp_strings_hyper"] = bpp_strings_hyper
            results["bpp_pointsnums"] = bpp_pointsnums
            results["bpp_cubepos"] = bpp_cubepos
            results["bpp"] = bpp

            print(results)

            if index_rho == 0:
                if index == 0:
                    all_results = results.copy(deep=True)
                else:
                    all_results = all_results.append(results, ignore_index=True)
            elif index_rho == 1:
                if index == 0:
                    all_results_d1 = results.copy(deep=True)
                else:
                    all_results_d1 = all_results_d1.append(results, ignore_index=True)
            else:
                if index == 0:
                    all_results_d2 = results.copy(deep=True)
                else:
                    all_results_d2 = all_results_d2.append(results, ignore_index=True)    

    # write to csv
    print(all_results)
    print(all_results_d1)
    print(all_results_d2)
    csv_root_dir = './CSV/hyper/'
    if not os.path.exists(csv_root_dir):
        os.makedirs(csv_root_dir)

    csv_name = os.path.join(csv_root_dir, filename + '.csv')
    all_results.to_csv(csv_name, index=False)

    csv_name_d1 = os.path.join(csv_root_dir, filename + '_d1.csv')
    all_results_d1.to_csv(csv_name_d1, index=False)

    csv_name_d2 = os.path.join(csv_root_dir, filename + '_d2.csv')
    all_results_d2.to_csv(csv_name_d2, index=False)
    
    return all_results, all_results_d1, all_results_d2
Example #5
0
def eval(input_file, rootdir, cfgdir, res, mode, cube_size, modelname, fixed_thres, postfix):
    # model = 'model_voxception'
    model = importlib.import_module(modelname)

    filename = os.path.split(input_file)[-1][:-4]
    input_file_n = input_file    
    csv_rootdir = rootdir
    if not os.path.exists(csv_rootdir):
        os.makedirs(csv_rootdir)
    csv_name = os.path.join(csv_rootdir, filename + '.csv')

    config = configparser.ConfigParser()
    config.read(cfgdir)

    cube_size = config.getint('DEFAULT', 'cube_size')
    min_num = config.getint('DEFAULT', 'min_num')
    print('cube size:', cube_size, 'min num:', min_num, 'res:', res)

    for index, rate in enumerate(config.sections()):
        scale = float(config.get(rate, 'scale'))
        ckpt_dir = str(config.get(rate, 'ckpt_dir'))
        rho_d1 = float(config.get(rate, 'rho_d1'))
        rho_d2 = float(config.get(rate, 'rho_d2'))
        print('='*80, '\n', 'config:', rate, 'scale:', scale, 'ckpt_dir:', ckpt_dir, 'rho (d1):', rho_d1, 'rho_d2:', rho_d2)

        if mode=="factorized":
            cubes_d, cube_positions, points_numbers, N, bpps = test_factorized(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix)
        elif mode == "hyper":
            cubes_d, cube_positions, points_numbers, N, bpps = test_hyper(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix)
        cubes_d = cubes_d.numpy()
        print("bpp:",bpps[0])

        # metrics.
        rho = 1.0
        output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
        postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
        results = pc_error(input_file, output_file, input_file_n, res, show=False)

        rho = rho_d1
        output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
        postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
        results_d1 = pc_error(input_file, output_file, input_file_n, res, show=False)

        rho = rho_d2
        output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
        postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
        results_d2 = pc_error(input_file, output_file, input_file_n, res, show=False)
         
        results = collect_results(results, results_d1, results_d2, bpps, N, scale, rho_d1, rho_d2)

        if index == 0:
            all_results = results.copy(deep=True)
        else:
            all_results = all_results.append(results, ignore_index=True)

        all_results.to_csv(csv_name, index=False)

    print(all_results)
    plot_results(all_results, filename, csv_rootdir)

    return all_results