M, C_pc, C_rpc, p, q, loss_fun='square_loss', alpha=0.2) H = - np.sum(dP * np.log(dP + 1e-5)) # print(k, H) P = P + dP # print(k, np.argmax(dP, axis=1)) re_ranking[k] = np.argmax(P, axis=1).tolist() return re_ranking if __name__ == '__main__': golden_annotations = defaultdict(list) for case in golden_cases: file = os.path.join(golden_train_dir, case, "annotation.txt") annotation = DataReader.parse_annotation(file) for k, v in annotation.items(): golden_annotations[k].append(np.asarray(v)) t = tqdm(train_cases) for case in t: raw_annotation = os.path.join(train_dir, case, "annotation.txt") annotation = {k: np.asarray(v) for k, v in DataReader.parse_annotation(raw_annotation).items()} re_ranking = align_annotation( golden_annotations, annotation, align_method='FGW') if re_ranking: re_annotation = {k: np.asarray([v[i] for i in re_ranking[k]]) for k, v in annotation.items()} save_refined_annotation(sorted_train_dir, case, re_annotation) else: fail_list.write(case + "\n")
results = {} for color in point_clouds: results[color] = [] for x, y in point_clouds[color]: if x > 300 and y > 300: results[color].append([x, y]) return results if __name__ == "__main__": target_case = os.listdir(target_folder) failure_count = 0 cannot_fix = 0 with tqdm(target_case) as t: for case_name in t: point_clouds = DataReader.parse_annotation( os.path.join(target_folder, case_name, file_name)) stats = {c: len(point_clouds[c]) for c in point_clouds} if not landmark_num_checker(point_clouds): print("case '{}' is not legal\n{}".format( case_name, json.dumps(point_clouds))) new_point_clouds = location_filter(point_clouds) if not landmark_num_checker(new_point_clouds): print( "location filtered case '{}' is not legal\n{}".format( case_name, json.dumps(new_point_clouds))) cannot_fix += 1 failure_count += 1 t.set_postfix(stats) print("Summary") print("In folder {}, {} failures found, {} cannot be auto-fixed".format( target_folder, failure_count, cannot_fix))