Esempio n. 1
0
    vox_str = f'{args.vox:02}'

    with open(args.experiment_path, 'r') as f:
        experiments = yaml.load(f.read())

    logger.info('Starting')
    params = []
    for experiment in experiments:
        pc_name, input_pc, input_norm, mpeg_mode = [
            experiment[x]
            for x in ['pc_name', 'input_pc', 'input_norm', 'mpeg_mode']
        ]
        input_filepath = os.path.join(args.dataset_dir, input_pc)
        new_input_pc = re.sub(r'vox\d\d', f'vox{vox_str}',
                              os.path.split(input_pc)[1])
        output_filepath = os.path.join(args.dataset_dir, f'vox{vox_str}',
                                       new_input_pc)
        params.append((input_filepath, output_filepath, args.vox))
        if input_norm != input_pc:
            input_norm_filepath = os.path.join(args.dataset_dir, input_norm)
            new_input_norm = re.sub(r'vox\d\d', f'vox{vox_str}',
                                    os.path.split(input_norm)[1])
            output_norm_filepath = os.path.join(args.dataset_dir,
                                                f'vox{vox_str}',
                                                new_input_norm)
            params.append(
                (input_norm_filepath, output_norm_filepath, args.vox))

    parallel_process(run, params, args.num_parallel)
    logger.info('Finished')
Esempio n. 2
0
            mpeg_output_dir = os.path.join(output_path, mpeg_id, pc_name)
            for rate in rates:
                current_mpeg_output_dir = os.path.join(mpeg_output_dir, rate)
                mpeg_cfg_path = f'{MPEG_TMC13_DIR}/cfg/{mpeg_id}/{cfg_name}/{rate}'
                pc_path = os.path.join(EXPERIMENT_DIR, 'gpcc', mpeg_id,
                                       pc_name, rate,
                                       f'{pc_name}.ply.bin.decoded.ply')
                if not os.path.exists(pc_path):
                    params.append((current_mpeg_output_dir, mpeg_cfg_path,
                                   input_pc, input_norm))
                else:
                    logger.info(f'{pc_path} exists')
    logger.info('Started GPCC experiments')
    # An SSD is highly recommended, extremely slow when running in parallel on an HDD due to parallel writes
    # If HDD, set parallelism to 1
    parallel_process(run_mpeg_experiment, params, args.num_parallel)

    logger.info('Finished GPCC experiments')

    logger.info('Starting point cloud recoloring')
    params = []
    for experiment in experiments['data']:
        pc_name, input_pc = [experiment[x] for x in ['pc_name', 'input_pc']]
        input_pc_full = os.path.join(MPEG_DATASET_DIR, input_pc)
        for model_config in mpeg_modes:
            mpeg_id = model_config['id']
            for rate in rates:
                pc_path = os.path.join(EXPERIMENT_DIR, 'gpcc', mpeg_id,
                                       pc_name, rate,
                                       f'{pc_name}.ply.bin.decoded.ply')
                cur_output_path = pc_path + '.color.ply'
        #                 radius = 9
        #         else:
        #             radius = 9
        radius = DEFAULT_RADIUS
        radiuses.append(radius)
    temp_df['radius'] = radiuses
    print(radiuses)

    os.makedirs(new_db_path, exist_ok=True)
    params = []
    for _, row in temp_df.iterrows():
        old_relative_path = row['old_relative_path']
        relative_path = row['relative_path']
        radius = row['radius']
        old_path = os.path.join(db_path, old_relative_path)
        path = os.path.join(new_db_path, relative_path)
        if not os.path.exists(path):
            logging.info(f'Process {old_path} to {path}')
            params.append((cc_path, old_path, path, radius, args.format))
        else:
            logging.info(f'Process {old_path} to {path}: already exists')
    parallel_process(cc_convert, params, args.n_parallel)

    final_df = temp_df.copy()
    final_df = final_df.drop(columns='old_relative_path')
    df_path = os.path.join(new_db_path, 'dataset.csv')
    final_df.to_csv(df_path, index=False)

    logger.info(
        f'Dataset information written to {df_path}: {len(final_df)} rows.')
Esempio n. 4
0
                    pc_output_dir = os.path.join(EXPERIMENT_DIR, pc_name,
                                                 'results', eval_id)
                    paths, patterns, ids, labels = zip(*data)
                    params.append((paths, patterns, ids, labels, pc_output_dir,
                                   opt_group, args.no_stream_redirection,
                                   args.path_filter, [opt_group], cur_rcParams,
                                   bd_ignore, no_legend, cur_lims))
                    conditions.append({
                        'pc_name': pc_name,
                        'eval_id': eval_id,
                        'opt_group': opt_group
                    })
                else:
                    logger.warning(
                        f'Ommiting eval condition {eval_id} for {pc_name}')
    parallel_process(run_compare, params, args.num_parallel)

    logger.info('Merging data')
    merged_output_dir = os.path.join(EXPERIMENT_DIR, 'results')
    os.makedirs(merged_output_dir, exist_ok=True)
    data_types = ['data', 'bdrate', 'bdsnr']
    for data_type in data_types:
        df_list = []
        for cond in conditions:
            csv_file = os.path.join(
                EXPERIMENT_DIR, cond['pc_name'], 'results', cond['eval_id'],
                f"{cond['opt_group']}_opt_rd_curve_{cond['opt_group']}_{data_type}.csv"
            )
            df = pd.read_csv(csv_file)
            for k, v in cond.items():
                df.insert(0, k, v)
Esempio n. 5
0
                                                   fixed_threshold)
            for lmbda in lambdas:
                lmbda_str = f'{lmbda:.2e}'
                checkpoint_id = model_config.get('checkpoint_id', model_id)
                model_dir = os.path.join(EXPERIMENT_DIR, 'models',
                                         checkpoint_id, lmbda_str)
                current_output_dir = os.path.join(opt_output_dir, model_id,
                                                  lmbda_str)

                pcerror_cfg_path = f'{MPEG_TMC13_DIR}/cfg/{pcerror_mpeg_mode}/{cfg_name}/r06/pcerror.cfg'
                input_pc_full = os.path.join(MPEG_DATASET_DIR, input_pc)
                input_norm_full = os.path.join(MPEG_DATASET_DIR, input_norm)
                if not os.path.exists(os.path.join(model_dir, 'done')):
                    logger.warning(
                        f'Model training is not finished: skipping {model_dir} for {pc_name}'
                    )
                else:
                    opt_groups = ['d1', 'd2']
                    if not all(
                            os.path.exists(
                                os.path.join(current_output_dir,
                                             f'report_{g}.json'))
                            for g in opt_groups):
                        params.append(
                            (current_output_dir, model_dir, config, pc_name,
                             PCERROR, pcerror_cfg_path, input_pc_full,
                             input_norm_full, cur_opt_metrics, cur_max_deltas,
                             cur_fixed_threshold, args.no_stream_redirection))
    parallel_process(run_experiment, params, args.num_parallel)
    logger.info('Done')
def run_experiment(output_dir,
                   model_dir,
                   model_config,
                   pc_name,
                   pcerror_path,
                   pcerror_cfg_path,
                   input_pc,
                   input_norm,
                   opt_metrics,
                   max_deltas,
                   fixed_threshold,
                   no_merge_coding,
                   num_parallel,
                   no_stream_redirection=False):
    for f in [model_dir, pcerror_path, pcerror_cfg_path, input_pc, input_norm]:
        assert_exists(f)
    validate_opt_metrics(opt_metrics, with_normals=input_norm is not None)
    with open(pcerror_cfg_path, 'r') as f:
        pcerror_cfg = yaml.load(f.read(), Loader=yaml.FullLoader)

    opt_group = ['d1', 'd2']
    enc_pc_filenames = [f'{pc_name}_{x}.ply.bin' for x in opt_group]
    dec_pc_filenames = [f'{x}.ply' for x in enc_pc_filenames]
    dec_pc_color_filenames = [f'{x}.color.ply' for x in dec_pc_filenames]
    pcerror_result_filenames = [f'{x}.pc_error' for x in dec_pc_filenames]
    enc_pcs = [os.path.join(output_dir, x) for x in enc_pc_filenames]
    dec_pcs = [os.path.join(output_dir, x) for x in dec_pc_filenames]
    dec_pcs_color = [
        os.path.join(output_dir, x) for x in dec_pc_color_filenames
    ]
    pcerror_results = [
        os.path.join(output_dir, x) for x in pcerror_result_filenames
    ]
    exp_reports = [
        os.path.join(output_dir, f'report_{x}.json') for x in opt_group
    ]

    compress_log = os.path.join(output_dir, 'compress.log')
    decompress_log = os.path.join(output_dir, 'decompress.log')

    # Create folder
    os.makedirs(output_dir, exist_ok=True)

    resolution = pcerror_cfg['resolution']

    # Encoding or Encoding/Decoding with merge_coding option
    if all(os.path.exists(x)
           for x in enc_pcs) and (no_merge_coding
                                  or all(os.path.exists(x) for x in dec_pcs)):
        print_progress(input_pc, enc_pcs, '(exists)')
    else:
        print_progress(input_pc, enc_pcs)
        with ExitStack() as stack:
            if no_stream_redirection:
                f = None
            else:
                f = open(compress_log, 'w')
                stack.enter_context(f)
            additional_params = []
            if not no_merge_coding:
                additional_params += ['--dec_files', *dec_pcs]
            if fixed_threshold:
                additional_params += ['--fixed_threshold']
            subprocess.run(
                [
                    'python',
                    'compress_octree.py',  # '--debug',
                    '--input_files',
                    input_pc,
                    '--input_normals',
                    input_norm,
                    '--output_files',
                    *enc_pcs,
                    '--checkpoint_dir',
                    model_dir,
                    '--opt_metrics',
                    *opt_metrics,
                    '--max_deltas',
                    *map(str, max_deltas),
                    '--resolution',
                    str(resolution + 1),
                    '--model_config',
                    model_config
                ] + additional_params,
                stdout=f,
                stderr=f,
                check=True)

    # Decoding, skipped with merge_coding option
    if all(os.path.exists(x) for x in dec_pcs):
        print_progress(enc_pcs, dec_pcs, '(exists)')
    elif not no_merge_coding:
        print_progress(enc_pcs, dec_pcs, '(merge_coding)')
    else:
        print_progress(enc_pcs, dec_pcs)
        with ExitStack() as stack:
            if no_stream_redirection:
                f = None
            else:
                f = open(decompress_log, 'w')
                stack.enter_context(f)
            subprocess.run(
                [
                    'python',
                    'decompress_octree.py',  # '--debug',
                    '--input_files',
                    *enc_pcs,
                    '--output_files',
                    *dec_pcs,
                    '--checkpoint_dir',
                    model_dir,
                    '--model_config',
                    model_config
                ],
                stdout=f,
                stderr=f,
                check=True)

    # Color mapping
    mc_params = []
    if all(os.path.exists(x) for x in dec_pcs_color):
        print_progress(dec_pcs, dec_pcs_color, '(exists)')
    else:
        for dp, dpc in zip(dec_pcs, dec_pcs_color):
            print_progress(dp, dpc)
            mc_params.append((input_pc, dp, dpc))
    parallel_process(run_mapcolor, mc_params, num_parallel)

    pcerror_cfg_params = [[f'--{k}', str(v)] for k, v in pcerror_cfg.items()]
    pcerror_cfg_params = flatten(pcerror_cfg_params)
    params = []
    for pcerror_result, decoded_pc in zip(pcerror_results, dec_pcs):
        if os.path.exists(pcerror_result):
            print_progress(decoded_pc, pcerror_result, '(exists)')
        else:
            print_progress(decoded_pc, pcerror_result)
            params.append((decoded_pc, input_norm, input_pc,
                           pcerror_cfg_params, pcerror_path, pcerror_result))
    parallel_process(run_pcerror, params, num_parallel)

    for pcerror_result, enc_pc, decoded_pc, experiment_report in zip(
            pcerror_results, enc_pcs, dec_pcs, exp_reports):
        if os.path.exists(experiment_report):
            print_progress('all', experiment_report, '(exists)')
        else:
            print_progress('all', experiment_report)
            pcerror_data = mpeg_parsing.parse_pcerror(pcerror_result)

            pos_total_size_in_bytes = os.stat(enc_pc).st_size
            input_point_count = len(PyntCloud.from_file(input_pc).points)
            data = {
                'pos_total_size_in_bytes': pos_total_size_in_bytes,
                'pos_bits_per_input_point':
                pos_total_size_in_bytes * 8 / input_point_count,
                'input_point_count': input_point_count
            }
            data = {**data, **pcerror_data}
            with open(experiment_report, 'w') as f:
                json.dump(data, f, sort_keys=True, indent=4)

            # Debug
            with open(enc_pc + '.enc.metric.json', 'r') as f:
                enc_metrics = json.load(f)
            diff = abs(enc_metrics['d1_psnr'] - data['d1_psnr'])
            logger.info(f'D1 PSNR diff between encoder and decoder: {diff}')
            assert diff < 0.01, f'encoded {enc_pc} with D1 {enc_metrics["d1_psnr"]} but decoded {decoded_pc} with D1 {data["d1_psnr"]}dB'

    logger.info('Done')
Esempio n. 7
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        prog='pc_to_vg_batch.py',
        description='Converts point clouds to voxelized point clouds.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('input_pattern', help='Input pattern.')
    parser.add_argument('output_dir', help='Output directory.')
    parser.add_argument('--vg_size',
                        help='Voxel grid resolution.',
                        default=64,
                        type=int)
    parser.add_argument('--num_parallel',
                        help='Number of parallel processes.',
                        default=multiprocessing.cpu_count(),
                        type=int)
    args = parser.parse_args()

    input_files = glob.glob(args.input_pattern, recursive=True)
    input_directory = os.path.normpath(
        os.path.commonprefix([os.path.split(x)[0] for x in input_files]))
    os.makedirs(args.output_dir, exist_ok=True)
    filenames = [x[len(input_directory) + 1:] for x in input_files]
    output_files = [os.path.join(args.output_dir, x) for x in filenames]

    assert all(s.startswith(args.output_dir)
               for s in output_files), 'Error during path processing'

    params = list(zip(input_files, output_files))
    parallel_process(process, params, args.num_parallel)
def run(input_file, output_folder, k='auto', max_steps=10000, point_size=1, num_parallel=multiprocessing.cpu_count()):
    filename = os.path.split(input_file)[1]
    filename_without_ext, ext = os.path.splitext(filename)
    model_folder = os.path.join(output_folder, 'model')
    eval_folder = os.path.join(output_folder, 'eval')
    patches_folder = os.path.join(output_folder, 'patches')
    patches_output = os.path.join(patches_folder, filename)

    ori_path = os.path.join(output_folder, filename)
    if not os.path.exists(ori_path):
        shutil.copy(input_file, ori_path)

    logger.info('Creating patches')
    if not os.path.exists(patches_folder):
        subprocess.run(['python', '10_pc_to_patch.py', input_file, patches_output, '--n_patches', str(k)], check=True)
    regexp = re.compile(r'_\d\d\.ply$')
    k = len(list(filter(lambda x: regexp.search(x), glob(os.path.join(patches_folder, '*.ply')))))
    assert k > 0

    logger.info('Init metadata')
    patches_data = {}
    for i in range(k):
        i_str = f'{i:02}'
        current_model_folder = os.path.join(model_folder, i_str)
        current_eval_folder = os.path.join(eval_folder, i_str)
        current_input_file = os.path.join(patches_folder, f'{filename_without_ext}_{i_str}.ply')
        patches_data[i] = {
            'model_folder': current_model_folder,
            'input_file': current_input_file,
            'eval_folder': current_eval_folder,
        }
    logger.info("\n" + pformat(patches_data))

    logger.info('Fitting point clouds')
    for i in range(k):
        pdata = patches_data[i]
        if not os.path.exists(pdata['model_folder']):
            subprocess.run(['python', '11_train.py',
                            pdata['input_file'], pdata['model_folder'],
                            '--max_steps', str(max_steps),
                            '--grid_steps', 'auto', '--model', '80_model', '--input_pipeline', '80_input',
                            ], check=True)

    logger.info('Generate foldings')
    for i in range(k):
        pdata = patches_data[i]
        if not os.path.exists(os.path.join(pdata['eval_folder'], 'folding_data.npz')):
            subprocess.run(['python', '20_gen_folding.py', pdata['input_file'], pdata['eval_folder'], pdata['model_folder'],
                            '--grid_steps', 'auto', '--model', '80_model', '--input_pipeline', '80_input'], check=True)

    logger.info('Eval point cloud attribute compression')
    params = []
    for i in range(k):
        pdata = patches_data[i]
        if not os.path.exists(os.path.join(pdata['eval_folder'], 'data.csv')):
            params.append((pdata['eval_folder'], ))
    parallel_process(eval_folding, params, num_parallel)

    logger.info('Merge patches and eval resulting point cloud')
    params = []
    for prefix in ['', 'refined_', 'refined_opt_']:
        qp_range = range(20, 55, 5)
        for qp in qp_range:
            qp_str = f'{prefix}qp_{qp:02}'
            merged_folder = os.path.join(output_folder, prefix + 'merged')
            current_output_folder = os.path.join(merged_folder, qp_str)
            current_output_file = os.path.join(current_output_folder, filename)
            current_output_report = os.path.join(current_output_folder, 'report.json')

            remapped_patches = glob(os.path.join(eval_folder, '*', qp_str, '*_remap.ply'))
            remapped_reports = glob(os.path.join(eval_folder, '*', qp_str, 'report.json'))
            assert len(remapped_patches) == k, f'Found {len(remapped_patches)} instead of {k} patches for qp {qp}'

            params.append((current_output_file, remapped_patches,
                           current_output_report, remapped_reports,
                           input_file, point_size))
    parallel_process(merge_and_eval, params, num_parallel)

    logger.info('Done')