def eval_predictions(pred_dir, anno_dir, width=30, unofficial=True, sequential=False): """Evaluates the predictions in pred_dir and returns CULane's metrics (precision, recall, F1 and its components)""" print(f'Loading annotation data ({anno_dir})...') annotations, label_paths = load_labels(anno_dir) print(f'Loading prediction data ({pred_dir})...') predictions = load_prediction_list(label_paths, pred_dir) print('Calculating metric {}...'.format('sequentially' if sequential else 'in parallel')) if sequential: results = t_map(partial(culane_metric, width=width, unofficial=unofficial, img_shape=LLAMAS_IMG_RES), predictions, annotations) else: results = p_map(partial(culane_metric, width=width, unofficial=unofficial, img_shape=LLAMAS_IMG_RES), predictions, annotations) total_tp = sum(tp for tp, _, _ in results) total_fp = sum(fp for _, fp, _ in results) total_fn = sum(fn for _, _, fn in results) if total_tp == 0: precision = 0 recall = 0 f1 = 0 else: precision = float(total_tp) / (total_tp + total_fp) recall = float(total_tp) / (total_tp + total_fn) f1 = 2 * precision * recall / (precision + recall) return {'TP': total_tp, 'FP': total_fp, 'FN': total_fn, 'Precision': precision, 'Recall': recall, 'F1': f1}
def __call__(self, parallel=False, timer=True): """A method to run a combo by simulating all countries.""" # Message # print("Running combo '%s'." % self.short_name) # Timer start # timer = Timer() timer.print_start() # Function to run a single country # def run_country(args): code, steps = args for runner in steps: return runner.run() # Run countries sequentially # if not parallel: result = t_map(run_country, self.runners.items()) # Run countries in parallel # if parallel: result = p_umap(run_country, self.runners.items(), num_cpus=4) # Timer end # timer.print_end() timer.print_total_elapsed() # Compile logs # self.compile_logs() # Return # return result
def main(path, processes): logging.info(f"Looking for images. Please wait a moment.") files = all_files_in(path) images = [file for file in files if is_image(file)] logging.info(f"Converting {len(images)} to jpeg") if processes == 0: logging.info("Running normally") t_map(convert_to_jpeg_and_override, images) else: processes = mp.cpu_count() if processes == -1 else processes logging.info(f"Runnining using [{processes}] processes") pool = mp.Pool(mp.cpu_count()) sults = pool.map(convert_to_jpeg_and_override, images) pool.close() logging.info("DONE")
def main(): df = group_df(get_df()) rows = t_map(parse_row, list(df.iterrows())) df = pd.DataFrame(rows) df["color"] = df.apply(lambda x: "nx" + str(x["nx"]) + "_" + ("vec" if x["vectorized"] else "nonvec"), axis=1) df = df[df["threads"] != 16] df = df[df["vectorized"] == False] fig = px.bar(df, x="threads", y="time_mean", error_y="time_std", color="color") fig.update_layout(barmode='group') fig.show() return df
def eval_predictions(pred_dir, anno_dir, list_path, width=30, official=True, sequential=False): print('List: {}'.format(list_path)) print('Loading prediction data...') predictions = load_culane_data(pred_dir, list_path) print('Loading annotation data...') annotations = load_culane_data(anno_dir, list_path) print('Calculating metric {}...'.format( 'sequentially' if sequential else 'in parallel')) img_shape = (590, 1640, 3) if sequential: results = t_map( partial(culane_metric, width=width, official=official, img_shape=img_shape), predictions, annotations) else: results = p_map( partial(culane_metric, width=width, official=official, img_shape=img_shape), predictions, annotations) total_tp = sum(tp for tp, _, _, _, _ in results) total_fp = sum(fp for _, fp, _, _, _ in results) total_fn = sum(fn for _, _, fn, _, _ in results) if total_tp == 0: precision = 0 recall = 0 f1 = 0 else: precision = float(total_tp) / (total_tp + total_fp) recall = float(total_tp) / (total_tp + total_fn) f1 = 2 * precision * recall / (precision + recall) return { 'TP': total_tp, 'FP': total_fp, 'FN': total_fn, 'Precision': precision, 'Recall': recall, 'F1': f1 }
# # os.system('start "excel" "../../../data/out-ndjson.xlsx"') # For medium json file # df = pd.read_json("../../../data/out-ndjson.txt", lines=True) # For large json file print('1. Open file and map to json..') with open('../../../data/out-ndjson.txt') as json_file: data = json_file.readlines() # this line below may take at least 8-10 minutes of processing for 4-5 million rows. It converts all strings in list to actual json objects. data = list(t_map(json.loads, data)) print('2. Convert data to pandas data frame..') df = pd.DataFrame(data) ### # Maximum width of an excel cell = 255 characters, which is not enough to contain the raw data # df.drop("raw", axis=1, inplace=True) # print (df.info()) print( '3. Data frame to excel.. (may take a few minutes to complete, eg 5 min/200k lines)' )