def run_mpeg_experiment(current_mpeg_output_dir, mpeg_cfg_path, input_pc, input_norm): input_pc_full = os.path.join(MPEG_DATASET_DIR, input_pc) input_norm_full = os.path.join(MPEG_DATASET_DIR, input_norm) os.makedirs(current_mpeg_output_dir, exist_ok=True) assert_exists(input_pc_full) assert_exists(input_norm_full) assert_exists(mpeg_cfg_path) return Popen([ 'make', '-f', f'{MPEG_TMC13_DIR}/scripts/Makefile.tmc13-step', '-C', current_mpeg_output_dir, f'VPATH={mpeg_cfg_path}', f'ENCODER={TMC13}', f'DECODER={TMC13}', f'PCERROR={PCERROR}', f'SRCSEQ={input_pc_full}', f'NORMSEQ={input_norm_full}' ])
f'Table for main comparisons with {main_bdrate_ref} \n{main_table}') logger.info('Loading figures') params = [] for experiment in experiments['data']: opt_metrics_groups = [[x for x in opt_metrics if x.startswith(group)] for group in opt_groups] for eval_id, eval_mode in eval_modes.items(): cur_modes = index_by_id(eval_mode['modes']) # Group metrics by prefix for opt_group, opt_metric_group in zip(opt_groups, opt_metrics_groups): pc_name = experiment['pc_name'] pc_output_dir = os.path.join(EXPERIMENT_DIR, pc_name, 'results', eval_id) if os.path.exists(pc_output_dir): input_fig = os.path.join( pc_output_dir, f'{opt_group}_opt_rd_curve_{opt_group}.pdf') output_fig = os.path.join( args.output_path, f'fig_{eval_id}_{pc_name}_{opt_group}.pdf') assert_exists(input_fig) shutil.copyfile(input_fig, output_fig) else: logger.warning( f'Ommiting eval condition {eval_id}, pc {pc_name}, opt_group {opt_group}' ) logger.info('Finished')
parser.add_argument('experiment_path', help='Experiments file path.') args = parser.parse_args() with open(args.experiment_path, 'r') as f: experiments = yaml.load(f.read(), Loader=yaml.FullLoader) keys = [ 'TRAIN_DATASET_PATH', 'EXPERIMENT_DIR', 'TRAIN_RESOLUTION', 'model_configs', 'alpha', 'gamma', 'batch_size', 'train_mode' ] TRAIN_DATASET_PATH, EXPERIMENT_DIR, TRAIN_RESOLUTION, model_configs, alpha, gamma, batch_size, train_mode = [ experiments[x] for x in keys ] assert train_mode in ['independent', 'warm_seq'] os.makedirs(EXPERIMENT_DIR, exist_ok=True) assert_exists(EXPERIMENT_DIR) logger = build_logger(__name__, os.path.join(EXPERIMENT_DIR, 'tr_train_all.log')) logger.info('Starting training') for model_config in model_configs: model_id = model_config['id'] config = model_config['config'] lambdas = model_config['lambdas'] cur_alpha = model_config.get('alpha', alpha) cur_gamma = model_config.get('gamma', gamma) cur_batch_size = model_config.get('batch_size', batch_size) cur_train_mode = model_config.get('train_mode', train_mode) assert cur_train_mode in ['independent', 'warm_seq'] for i, lmbda in enumerate(lambdas):
default=multiprocessing.cpu_count(), type=int) args = parser.parse_args() with open(args.experiment_path, 'r') as f: experiments = yaml.load(f.read(), Loader=yaml.FullLoader) keys = [ 'MPEG_TMC13_DIR', 'PCERROR', 'MPEG_DATASET_DIR', 'EXPERIMENT_DIR', 'mpeg_modes', 'rates' ] MPEG_TMC13_DIR, PCERROR, MPEG_DATASET_DIR, EXPERIMENT_DIR, mpeg_modes, rates = [ experiments[k] for k in keys ] TMC13 = f'{MPEG_TMC13_DIR}/build/tmc3/tmc3' assert_exists(TMC13) assert_exists(PCERROR) assert_exists(MPEG_DATASET_DIR) output_path = os.path.join(EXPERIMENT_DIR, 'gpcc') logger.info('Starting GPCC experiments') params = [] for mpeg_mode in mpeg_modes: mpeg_id = mpeg_mode['id'] for experiment in experiments['data']: pc_name, cfg_name, input_pc, input_norm = \ [experiment[x] for x in ['pc_name', 'cfg_name', 'input_pc', 'input_norm']] mpeg_output_dir = os.path.join(output_path, mpeg_id, pc_name) for rate in rates: current_mpeg_output_dir = os.path.join(mpeg_output_dir, rate)
cur_modes = index_by_id(cur_modes) cur_rcParams = eval_mode.get('rcParams') # Group metrics by prefix for opt_group, opt_metric_group, cur_lims in zip( opt_groups, opt_metrics_groups, lims): pc_name = experiment['pc_name'] data = [] for id, cur_mode in cur_modes.items(): if id in mpeg_modes: mpeg_mode = mpeg_modes[id] mpeg_label = cur_mode.get('label', mpeg_mode.get('label', id)) mpeg_output_dir = os.path.join(mpeg_path, id, pc_name) assert_exists(mpeg_output_dir) data.append((mpeg_output_dir, '**/report.json', id, mpeg_label)) elif id in model_configs: model_config = model_configs[id] model_label = cur_mode.get( 'label', model_config.get('label', id)) pattern = f'**/report_{opt_group}.json' model_output_dir = os.path.join( EXPERIMENT_DIR, pc_name, id) reports_glob = os.path.join(model_output_dir, pattern) reports = glob(reports_glob, recursive=True) if not os.path.exists(model_output_dir): logger.warning( f'Model folder {model_output_dir} was not found: omitting model' )
help='Disable stdout and stderr redirection.', default=False, action='store_true') args = parser.parse_args() with open(args.experiment_path, 'r') as f: experiments = yaml.load(f.read(), Loader=yaml.FullLoader) keys = [ 'MPEG_TMC13_DIR', 'PCERROR', 'MPEG_DATASET_DIR', 'EXPERIMENT_DIR', 'pcerror_mpeg_mode', 'model_configs', 'opt_metrics', 'max_deltas', 'fixed_threshold' ] MPEG_TMC13_DIR, PCERROR, MPEG_DATASET_DIR, EXPERIMENT_DIR, pcerror_mpeg_mode, model_configs, opt_metrics,\ max_deltas, fixed_threshold = [experiments[x] for x in keys] #assert_exists(PCERROR) assert_exists(MPEG_DATASET_DIR) assert_exists(EXPERIMENT_DIR) validate_opt_metrics(opt_metrics, with_normals=True) logger.info('Starting our method\'s experiments') params = [] for experiment in experiments['data']: pc_name, cfg_name, input_pc, input_norm = \ [experiment[x] for x in ['pc_name', 'cfg_name', 'input_pc', 'input_norm']] opt_output_dir = os.path.join(EXPERIMENT_DIR, pc_name) for model_config in model_configs: model_id = model_config['id'] config = model_config['config'] lambdas = model_config['lambdas'] cur_opt_metrics = model_config.get('opt_metrics', opt_metrics) cur_max_deltas = model_config.get('max_deltas', max_deltas)
def run_experiment(output_dir, model_dir, model_config, pc_name, pcerror_path, pcerror_cfg_path, input_pc, input_norm, opt_metrics, max_deltas, fixed_threshold, no_merge_coding, num_parallel, no_stream_redirection=False): for f in [model_dir, pcerror_path, pcerror_cfg_path, input_pc, input_norm]: assert_exists(f) validate_opt_metrics(opt_metrics, with_normals=input_norm is not None) with open(pcerror_cfg_path, 'r') as f: pcerror_cfg = yaml.load(f.read(), Loader=yaml.FullLoader) opt_group = ['d1', 'd2'] enc_pc_filenames = [f'{pc_name}_{x}.ply.bin' for x in opt_group] dec_pc_filenames = [f'{x}.ply' for x in enc_pc_filenames] dec_pc_color_filenames = [f'{x}.color.ply' for x in dec_pc_filenames] pcerror_result_filenames = [f'{x}.pc_error' for x in dec_pc_filenames] enc_pcs = [os.path.join(output_dir, x) for x in enc_pc_filenames] dec_pcs = [os.path.join(output_dir, x) for x in dec_pc_filenames] dec_pcs_color = [ os.path.join(output_dir, x) for x in dec_pc_color_filenames ] pcerror_results = [ os.path.join(output_dir, x) for x in pcerror_result_filenames ] exp_reports = [ os.path.join(output_dir, f'report_{x}.json') for x in opt_group ] compress_log = os.path.join(output_dir, 'compress.log') decompress_log = os.path.join(output_dir, 'decompress.log') # Create folder os.makedirs(output_dir, exist_ok=True) resolution = pcerror_cfg['resolution'] # Encoding or Encoding/Decoding with merge_coding option if all(os.path.exists(x) for x in enc_pcs) and (no_merge_coding or all(os.path.exists(x) for x in dec_pcs)): print_progress(input_pc, enc_pcs, '(exists)') else: print_progress(input_pc, enc_pcs) with ExitStack() as stack: if no_stream_redirection: f = None else: f = open(compress_log, 'w') stack.enter_context(f) additional_params = [] if not no_merge_coding: additional_params += ['--dec_files', *dec_pcs] if fixed_threshold: additional_params += ['--fixed_threshold'] subprocess.run( [ 'python', 'compress_octree.py', # '--debug', '--input_files', input_pc, '--input_normals', input_norm, '--output_files', *enc_pcs, '--checkpoint_dir', model_dir, '--opt_metrics', *opt_metrics, '--max_deltas', *map(str, max_deltas), '--resolution', str(resolution + 1), '--model_config', model_config ] + additional_params, stdout=f, stderr=f, check=True) # Decoding, skipped with merge_coding option if all(os.path.exists(x) for x in dec_pcs): print_progress(enc_pcs, dec_pcs, '(exists)') elif not no_merge_coding: print_progress(enc_pcs, dec_pcs, '(merge_coding)') else: print_progress(enc_pcs, dec_pcs) with ExitStack() as stack: if no_stream_redirection: f = None else: f = open(decompress_log, 'w') stack.enter_context(f) subprocess.run( [ 'python', 'decompress_octree.py', # '--debug', '--input_files', *enc_pcs, '--output_files', *dec_pcs, '--checkpoint_dir', model_dir, '--model_config', model_config ], stdout=f, stderr=f, check=True) # Color mapping mc_params = [] if all(os.path.exists(x) for x in dec_pcs_color): print_progress(dec_pcs, dec_pcs_color, '(exists)') else: for dp, dpc in zip(dec_pcs, dec_pcs_color): print_progress(dp, dpc) mc_params.append((input_pc, dp, dpc)) parallel_process(run_mapcolor, mc_params, num_parallel) pcerror_cfg_params = [[f'--{k}', str(v)] for k, v in pcerror_cfg.items()] pcerror_cfg_params = flatten(pcerror_cfg_params) params = [] for pcerror_result, decoded_pc in zip(pcerror_results, dec_pcs): if os.path.exists(pcerror_result): print_progress(decoded_pc, pcerror_result, '(exists)') else: print_progress(decoded_pc, pcerror_result) params.append((decoded_pc, input_norm, input_pc, pcerror_cfg_params, pcerror_path, pcerror_result)) parallel_process(run_pcerror, params, num_parallel) for pcerror_result, enc_pc, decoded_pc, experiment_report in zip( pcerror_results, enc_pcs, dec_pcs, exp_reports): if os.path.exists(experiment_report): print_progress('all', experiment_report, '(exists)') else: print_progress('all', experiment_report) pcerror_data = mpeg_parsing.parse_pcerror(pcerror_result) pos_total_size_in_bytes = os.stat(enc_pc).st_size input_point_count = len(PyntCloud.from_file(input_pc).points) data = { 'pos_total_size_in_bytes': pos_total_size_in_bytes, 'pos_bits_per_input_point': pos_total_size_in_bytes * 8 / input_point_count, 'input_point_count': input_point_count } data = {**data, **pcerror_data} with open(experiment_report, 'w') as f: json.dump(data, f, sort_keys=True, indent=4) # Debug with open(enc_pc + '.enc.metric.json', 'r') as f: enc_metrics = json.load(f) diff = abs(enc_metrics['d1_psnr'] - data['d1_psnr']) logger.info(f'D1 PSNR diff between encoder and decoder: {diff}') assert diff < 0.01, f'encoded {enc_pc} with D1 {enc_metrics["d1_psnr"]} but decoded {decoded_pc} with D1 {data["d1_psnr"]}dB' logger.info('Done')
suffix = f'_{comp["opt_group"]}' # Gather report and image report_path = os.path.join( os.path.split(path)[0], f'report{suffix}.json') with open(report_path, 'r') as f: report = f.read() shutil.copyfile( report_path, os.path.join(vc_folder, comp['id'] + '.report.json')) shutil.copyfile(path + '.png', os.path.join(vc_folder, comp['id'] + '.ply.png')) # Read point cloud logger.info(f'Loading {comp["id"]}\n{report}') assert_exists(path) cur_pcd = o3d.io.read_point_cloud(path) cur_pcd_points = np.asarray(cur_pcd.points) compared_pcds.append(cur_pcd) compared_err.append( compute_d1_res_ba(pcd_points, cur_pcd_points, t1=t1)) # Computing min, max, target percentile all_res = np.concatenate(compared_err) percentile = 99 global_max = np.max(all_res) global_pmax = np.percentile(all_res, percentile) global_min = 0.0 # Build colorbar