def main(dir_path, bad_result, record_tag): ''' :param dir_path: :param label_list: :return: ''' #try: data_tag = loadTag(record_tag["input_dir"]) abnormal_ap_label = data_tag.get("origin_record_tag")[0] case_finder_json = loadTag(record_tag["input_dir"], tag_file_name='screen_cast/case_finder.json') if abnormal_ap_label == []: return 1 bad_case_list = defaultdict(lambda: {}) for bad_case_time in bad_result: bad_case_list[int(bad_case_time)] = bad_result[bad_case_time]["labels"] if len(bad_case_list) == 0: case_finder_json["ap_evaluation_result"] = [] case_finder_json["has_found"] = False else: case_finder_json["ap_evaluation_result"] = generateEvluationScore( bad_case_list, abnormal_ap_label) if len(case_finder_json["ap_evaluation_result"]) != 0: case_finder_json["ap_evaluation_result"].append("HasFound") print('\n-----', case_finder_json["ap_evaluation_result"], '\n') saveTag(record_tag["input_dir"], case_finder_json, '/screen_cast/case_finder.json')
def renameCaseTaggingTag(case_output_path): "as the function name" case_finder = loadTag(case_output_path, 'case_finder.json') try: if case_finder == [] or case_finder is None: case_finder = {} saveTag(case_output_path, case_finder, 'case_finder.json') return if "obstacle_id" in case_finder[0] and case_finder[0][ "obstacle_id"] == [""]: case_finder[0]["obstacle_id"] = [] if "obstacle_id" in case_finder[0] and case_finder[0][ "obstacle_id"] == ["0"]: case_finder[0]["obstacle_id"] = [] if "ego_tags" in case_finder[0]: case_finder[0]["ego_tags"] = refineTag(case_finder[0]["ego_tags"]) if "obstacle_vehicle_tags" in case_finder[0]: case_finder[0]["obstacle_vehicle_tags"] = refineTag( case_finder[0]["obstacle_vehicle_tags"]) if "obstacle_vru_tags" in case_finder[0]: case_finder[0]["obstacle_vru_tags"] = refineTag( case_finder[0]["obstacle_vru_tags"]) saveTag(case_output_path, case_finder[0], 'case_finder.json') except Exception as e: print("222")
def readDataTagJson(self, dir_path): data_tag = loadTag(dir_path, "data-tag.json") if data_tag is None or data_tag == []: return [] if "backup" in data_tag: data_tag = data_tag["backup"][0]["data_tag"] return data_tag
def matchBackUpTag(self, dir_path): try: dir_name = dir_path.split('/', -1)[-2] test_time_list = dir_name.split('_', -1) if test_time_list[-1] != "AutoCollect": return None test_time = [ int(test_time_list[1]), int(test_time_list[2]), int(test_time_list[3]), int(test_time_list[4]) ] for backup_tag in self.backup_tag_list: backup_tag_name = os.path.basename(backup_tag) tag_time_str = backup_tag_name.split('(', -1)[1].split(')', -1)[0] tag_time = tag_time_str.split('-', -1) print(test_time, tag_time) tag_gap = (test_time[0]-int(tag_time[0]))*43200\ +(test_time[1]-int(tag_time[1]))*1440\ +(test_time[2] - int(tag_time[2])) * 60\ +(test_time[3] - int(tag_time[3])) print(tag_gap) if abs(tag_gap) < 4: tag_data = loadTag(self.input_data_path, backup_tag_name) print(backup_tag_name) return tag_data return None except Exception as e: print("backup tag not found")
def renameCaseTaggingTag(self, dir_path): case_output_path = os.path.join(dir_path, "screen_cast") case_finder = loadTag(case_output_path, 'case_finder.json') saved_ego_tags = [] if case_finder == [] or case_finder is None: case_finder = {} saveTag(case_output_path, case_finder, 'case_finder.json') return if "obstacle_id" in case_finder[0] and case_finder[0][ "obstacle_id"] == [""]: case_finder[0]["obstacle_id"] = [] if "obstacle_id" in case_finder[0] and case_finder[0][ "obstacle_id"] == ["0"]: case_finder[0]["obstacle_id"] = [] if "ego_tags" in case_finder[0]: case_finder[0]["ego_tags"] = self.refineTag( case_finder[0]["ego_tags"]) saved_ego_tags = case_finder[0]["ego_tags"] if "obstacle_vehicle_tags" in case_finder[0]: case_finder[0]["obstacle_vehicle_tags"] = self.refineTag( case_finder[0]["obstacle_vehicle_tags"]) if "obstacle_vru_tags" in case_finder[0]: case_finder[0]["obstacle_vru_tags"] = self.refineTag( case_finder[0]["obstacle_vru_tags"]) saveTag(case_output_path, case_finder[0], 'case_finder.json') return saved_ego_tags
def getLocalizationResult(self, dir_path): "get the localization result" localization_result = defaultdict(lambda: {}) result_file_path = ''.join([dir_path, '/logs/localization_eval/']) localization_json = loadTag(result_file_path, tag_file_name='evaluation_result.json') if localization_json is not None: try: localization_result["Grade"] = localization_json["Grade"] localization_result["Integrity"] = localization_json[ "Integrity"] localization_result["Odometer(km)"] = localization_json[ "Odometer(km)"] localization_result["Setting"] = localization_json["Setting"] localization_result["Mileage(km)"] = localization_json.get( "Mileage(km)") if localization_json.get( "Odometer(km)") > 1.0 and localization_json.get( "Odometer(km)") < 100.0: self.tag_info["test_mileage"] = localization_json.get( "Odometer(km)") except Exception as e: print("localization key is not here") if self.tag_info["test_mileage"] < 1.0 or self.tag_info[ "test_mileage"] > 100.0: self.tag_info["test_mileage"] = 18.0 return localization_result
def segmentPreprationCollection(self, dir_name): "prepration for segment" finder_path = self.input_data_path + dir_name + '/finder_case.json' if os.path.exists(finder_path): finder_case_json = loadTag(finder_path, '') if finder_case_json == []: return self.dataSegment(dir_name, finder_case_json)
def JudgeInvalid(self,eval_result_path, id_list): moved_tag = loadTag(eval_result_path, 'moved_id.json') if moved_tag is None or "moved_id" not in moved_tag.keys(): return "" # for id in id_list: # if id in moved_tag["moved_id"]["static"]: # return "3DPerception" return ""
def brake_main(self, raw_dir_path, record_tag): dir_path = record_tag["input_dir"] tagging_module = record_tag["tagging_module"] tag_save_path = ''.join([dir_path, 'screen_cast/']) dpc_file_path = ''.join([dir_path, 'dmppcl.bag']) bin_path = ''.join([dir_path, 'simulator_scenario/0/logger.bin']) print(record_tag) if tagging_module == 3: self.case_finder(bin_path, tag_save_path, 3) tag = loadTag(tag_save_path, 'case_finder.json') elif tagging_module == 0: bin_path = ''.join( [dir_path, '/simulator_scenario/simulator_scenario_log.bin']) self.case_finder(bin_path, dir_path, 0) elif tagging_module == 2: topic_names = ['/control/control_error'] tag_info = loadTag(dir_path) record_timestamp = int( tag_info["origin_record_tag"][0]["start"]) / 1000 print(record_timestamp) bag_data = self.getDataFromDpcbag(dpc_file_path, topic_names, record_timestamp) brake_timestamp, ego_label = self.parseBrakeTimestampFromBag( bag_data) brake_timestamp_bj = self.unixToBjclock(brake_timestamp) brake_timestamp_bj_1 = self.unixToBjclock(brake_timestamp - 0.5) nsec = math.modf(brake_timestamp)[0] * 1000 nsec_1 = math.modf(brake_timestamp - 0.5)[0] * 1000 print('brake case timestamp: ', brake_timestamp, brake_timestamp_bj) id_json_path = ''.join( [record_tag["input_dir"], 'screen_cast/obstacle.json']) self.case_finder(bin_path, tag_save_path, 2, id_json_path, int(brake_timestamp), int(nsec), 5, 1) else: self.case_finder(bin_path, tag_save_path, 1, 0, int(record_tag["input_timestamp"]), int(000), 5, 1)
def mainUpload(dir_path_without): ''' upload and then archive data :param dir_path_without: data path end without / ''' tag_info = loadTag(dir_path_without + '/') dir_name = os.path.split(dir_path_without)[1] data_upload(dir_path_without, tag_info, slice=False) if os.path.exists(dir_path_without + "_slice/"): data_upload(dir_path_without, tag_info, slice=True)
def JudgeInvalid(eval_result_path, id_list): moved_tag = loadTag(eval_result_path, 'moved_id.json') if moved_tag is None or "invalid" not in moved_tag.keys(): return "" if moved_tag["static"] == []: return "" for id in id_list: if id in moved_tag["static"]: return "3DPerception" return ""
def tlProcess(self, dir_path, record_tag): status = False case_toss_file = os.path.join(dir_path, 'screen_cast/case_tag.json') case_toss_tag = loadTag(case_toss_file, '') if case_toss_tag is None or case_toss_tag == {}: return record_tag, status case_toss_tag["Attributes"] = {} case_toss_tag["Object_check"] = {} case_toss_tag["Attributes"]["object_problem"] = 0 case_toss_tag["Attributes"]["traffic_light_problem"] = 0 case_toss_tag["Object_check"]["object_pos"] = 0 if record_tag['labels'] != []: for label in record_tag['labels']: if label in self.auto_module_["supply_label_contrast"]: labeled_module = self.auto_module_[ "supply_label_contrast"][label] case_toss_tag["Attributes"][labeled_module] = 1 if labeled_module == "traffic_light_problem": try: case_toss_tag["Pr_check"][ "true_tl_label"] = self.auto_module_[ "label_to_number"][label] except: print("tl toss error") saveTag(case_toss_file, case_toss_tag, '') self.autoModuleDsitrib(dir_path) if not os.path.isfile( os.path.join(dir_path, 'screen_cast/auto_module.json')): return record_tag, status auto_module_tag = loadTag( os.path.join(dir_path, 'screen_cast/auto_module.json'), '') if auto_module_tag["Module"] != []: status = True if auto_module_tag["Module"] == [ "C-TL" ] and "红绿灯场景变化" in record_tag["labels"]: auto_module_tag["Module"] = ["Road"] for module in auto_module_tag["Module"]: record_tag["modules"].append(module) record_tag["modules"] = list(set(record_tag["modules"])) return record_tag, status
def addEgoTagToLabel(self, record_tag, ego_tags): case_tag = loadTag(record_tag["input_dir"], 'data-tag.json') if not "origin_record_tag" in case_tag.keys(): return if not "labels" in case_tag["origin_record_tag"][0]: case_tag["origin_record_tag"][0]["labels"] = [] if ego_tags is not None: for tag in ego_tags: case_tag["origin_record_tag"][0]["labels"].append(tag) saveTag(record_tag["input_dir"], case_tag, 'data-tag.json')
def __init__(self, path): self.input_data_path = path self.file_list = deque() self.backup_tag_list = self.getMatchedFilePaths(path, "data-tag*", formats=[".json"], recursive=False) self.tag_info = defaultdict(lambda: {}) self.getAllDataDir() self.tag_file_name = '/data-tag.json' self.headerdata = {"Data-tag-type": "application/json"} self.check_file_name_list = loadTag(tag_file_name='data_check.json') self.tag_module_list = loadTag(tag_file_name='tag_module.json') self.post = True self.pool = ThreadPool(int(multiprocessing.cpu_count() * 0.6)) self.check_true_file_list = [] self.check_false_file_list = [] self.false_check_reasion = [] self.case_tagging = BrakeCaseTagging() self.tprofile_thresh = loadTag('tprofile_thresh.json', '') self.readShellFile('download_logs.sh')
def moduleProfileParse(dir_path, tprofile_name, tprofile_case, result): case_json = loadTag(os.path.join(dir_path, 'tprofile_result'), tprofile_name) if case_json is None or case_json == []: return tprofile_case, result for case in case_json: if not case in tprofile_case: tprofile_case.append(case) if "modules" in case.keys(): for module in case["modules"]: if not module in result.keys(): result[module] = 0 result[module] += 1
def getControlEvalResult(dir_name): eval_tag_path = os.path.join(dir_name, 'logs/control_eval/') eval_tag = loadTag(eval_tag_path, 'control_eval_results.json') if eval_tag is None: return {} eval_tag = eval_tag.get("Tag") if len(eval_tag) < 1: return None for case_tag in eval_tag: if not "labels" in case_tag.keys(): case_tag["labels"] = [] case_tag["labels"].append(case_tag["tag_en"]) case_tag["data_type"] = "eval" return eval_tag
def parsingJsonForModuleTimeConsuming(tprofile_json, function_name): module_time_consuming = {} tprofile_tag = loadTag(tprofile_json, tag_file_name='') function_time_list = tprofile_tag["tprofile_result"] call_number = 0 for function_time in function_time_list: if function_time[0] == function_name: if call_number == 0 or call_number < function_time[3]: call_number = function_time[3] module_time_consuming["mean"] = function_time[1] module_time_consuming["stddev"] = function_time[2] else: pass return module_time_consuming
def CutSimulatorScenario(data_dir, point_list): ''' input: rec_file:str point_list:[{"time_point":121545, "front_duration":15,"behind_duration":5,"output_dir":/path/to/output}] ''' config_ = loadTag('config/data_pipeline_config.json', '') try: if os.path.isfile( os.path.join( config_["senseauto_path"], "senseauto-simulation/node/build/module/simulator/tools/scenario_log_tools/scenario_log_razor" )): razor = os.path.join( config_["senseauto_path"], "senseauto-simulation/node/build/module/simulator/tools/scenario_log_tools/scenario_log_razor" ) elif os.path.isfile( os.path.join( config_["senseauto_path"], "senseauto-simulation/node/module/simulator/tools/scenario_log_tools/scenario_log_razor" )): razor = os.path.join( config_["senseauto_path"], "senseauto-simulation/node/module/simulator/tools/scenario_log_tools/scenario_log_razor" ) else: print("checkpoint53") print("cannot find the simulator_scenario_log_razor, exit") for seg_point in point_list: time_point = seg_point["time_point"] front_duration = seg_point["front_duration"] behind_duration = seg_point["behind_duration"] output_dir = seg_point["output_dir"] simulator_file = os.path.join( data_dir, 'simulator_scenario/simulator_scenario_log.bin') outout_dir_path = os.path.join(output_dir, 'simulator_scenario') print("checkpoint54") razor_cmd = "{} 1 {} {} {} {} {}".format( razor, simulator_file, outout_dir_path, str(int(time_point // 1000000)), str(front_duration + 15), str(behind_duration + 20)) print("checkpoint55") print(razor_cmd) os.system(razor_cmd) print("checkpoint56") except Exception as e: print("checkpoint57") print(getTime() + "\033[1;31m [ERROR]\033[0m cut simulator.bin failed ")
def prePredictionEval(self, dir_path, prediction_tagging_list): print "\033[1;32m [INFO]\033[0m! prediction tagging ing .........\n" eval_bad_result = loadTag( dir_path, 'prediction_evaluation/result/bad_cases.json') input_list = [] for record_tag in prediction_tagging_list: input_list.append(([dir_path, eval_bad_result, record_tag], None)) try: requests = makeRequests(prediction_evaluation_iteration.main, input_list) [self.pool.putRequest(req) for req in requests] self.pool.wait() except Exception as e: return print "\033[1;32m [INFO]\033[0m! prediction tagging successfully\n"
def getLocalEvalResult(dir_name): eval_tag_path = os.path.join(dir_name, 'logs/localization_eval/') local_tag = loadTag(eval_tag_path, 'evaluation_result.json') if local_tag is None: return {} eval_tag = local_tag.get("Tags") if eval_tag is None or len(eval_tag) < 1: return None for case_tag in eval_tag: if not "labels" in case_tag.keys(): case_tag["labels"] = [] case_tag["labels"].append(case_tag["tag_en"]) case_tag["data_type"] = "eval" return eval_tag
def getPredictionResult(self, dir_path): "add the control evaluation result to tag" ap_result = defaultdict(lambda: {}) result_file_path = ''.join( [dir_path, '/prediction_evaluation/result/']) ap_json = loadTag(result_file_path, tag_file_name='result.json') if ap_json is not None: try: ap_result["quality"] = ap_json["quality"] except Exception as e: logger(1, str(e), LOG_FILE="../upload_list/error.log") print("ap key is not here") ap_result["quality"] = {} ap_result["quality"]['level'] = "bad" return ap_result
def deterDirProperty(self, dir_path): "determine file property according the tag" tag_data = loadTag(dir_path) get_match_tag = False if tag_data is None: get_match_tag = True elif not "origin_record_tag" in tag_data.keys() \ or not "task_id" in tag_data.keys() \ or not "test_car_id" in tag_data.keys() \ or not "issue_id" in tag_data.keys(): get_match_tag = True if get_match_tag: tag_data = self.matchBackUpTag(dir_path) if tag_data is None: tag_data = self.generateTag(dir_path) return tag_data else: return tag_data
def sliceDataCheck(self, dir_path): "segmented data check and post" data_tag_paths = self.getMatchedFilePaths(dir_path, pattern="data-ta*", formats=[".json"], recursive=True) for slice_data_tag_path in data_tag_paths: slice_data_path = os.path.split(slice_data_tag_path)[0] check_result, false_reason = self.checkRec(slice_data_path + '/', slice=True) if not check_result: continue slice_data_tag = loadTag(slice_data_tag_path, tag_file_name='') self.TransferPost(slice_data_tag) # post data tag to senseFT
def getControlResult(self, dir_path): "add the control evaluation result to tag" control_result = defaultdict(lambda: {}) result_file_path = ''.join([dir_path, '/logs/control_eval/']) control_json = loadTag(result_file_path, tag_file_name='control_eval_results.json') if control_json is not None: try: control_result = control_json["control_result"] if 'stop_error' in control_result['control_precision'].keys(): control_result['control_precision']['stop_error'][ 'std'] = 0.901 except Exception as e: logger(1, str(e), LOG_FILE="../upload_list/error.log") print("control key is not here") return control_result
def falseDataUpload(self, dir_path_without): false_data_tag = loadTag(dir_path_without) dir_name = os.path.split(dir_path_without)[1] upload_path = ''.join([dir_path_without, '/ ']) dst_path = ''.join([ "s3://sh40_fieldtest_dataset/", data_month, '/false_data/', dir_name ]) arg2 = ''.join( ["s3 cp ", upload_path, dst_path, upload_recursive + end_point]) AWS_DRIVER.main(arg2.split()) if false_data_tag is not None and false_data_tag["topic_name"] != [ "repo_master" ]: false_data_tag["data_link"] = dst_path false_data_tag["data_type"] = "raw" self.TransferPost(self, false_data_tag)
def main(dir_path): data_tag = loadTag(dir_path) upload_path = data_tag["data_link"] record_tag = data_tag["origin_record_tag"][0] seg_point = {} seg_point["time_point"] = record_tag["start"] * 1000 if "end" in record_tag: seg_point["front_duration"] = 2 seg_point["behind_duration"] = (record_tag["end"] - record_tag["start"]) / 1000 else: seg_point["front_duration"] = 25 seg_point["behind_duration"] = 15 seg_point["output_dir"] = dir_path #CutSimulatorScenario(dir_path,seg_point) if "end" in record_tag.keys(): input_timestamp = (record_tag["start"] + record_tag["end"]) / 2000 else: input_timestamp = record_tag["start"] / 1000 module_name = record_tag["tag_en"] if module_name == "false_brake" or module_name == "Emergency_brake" or module_name == "sharp_slowdown": tagging_module = 2 elif module_name == "take_over": tagging_module = 3 else: tagging_module = 1 tagging_tag = { "input_dir": dir_path + '/', "module_name": record_tag["tag_en"], "input_timestamp": input_timestamp, "tagging_module": tagging_module } try: os.makedirs(dir_path + '/screen_cast') except Exception as e: print(111) case_tagging.main(download_path, tagging_tag) renameCaseTaggingTag(dir_path + '/screen_cast/') data_upload(dir_path, upload_path, slice=True)
def addWhetherInfo(self, global_tag, dir_path): tag_path = os.path.join(dir_path, "screen_cast") case_toss = loadTag(tag_path, "case_tag.json") if case_toss is None or global_tag == []: return whether_tag = 0 for tag in global_tag: if tag not in ["阳光直射", "阳光背射", "凌晨", "黄昏", "夜晚有路灯", "夜晚无路灯"]: continue if tag == "阳光直射" or tag == "阳光背射": whether_tag = 1 elif tag == "黄昏": whether_tag = 2 else: whether_tag = 3 if "Global_label" not in case_toss.keys(): case_toss["Global_label"] = {} case_toss["Global_label"]["day_time"] = whether_tag saveTag(tag_path, case_toss, 'case_tag.json')
def calculatePrecision(dir_path, file_list): true_tagging = 0 false_tagging = 0 for brake_dir_name in file_list: case_tagging_path = dir_path + brake_dir_name + '/screen_cast/' id_file_path = GetMatchedFilePaths(case_tagging_path, "*", ".id", False) print(brake_dir_name, id_file_path) id_file_name = os.path.basename(id_file_path[0]) true_id = int(id_file_name.split('.', -1)[0]) tagging_json = loadTag(case_tagging_path, 'case_finder.json') tagging_id = int(tagging_json[0]["obstacle_id"][0]) if true_id == tagging_id: true_tagging += 1 else: false_tagging += 1 print(brake_dir_name) print "\n====tagging precision====: ", float(true_tagging) / float( true_tagging + false_tagging), '\n'
def mainUpload(self, dir_path_without): ''' upload and then archive data :param dir_path_without: data path end without / ''' tag_info = loadTag(dir_path_without) dir_name = os.path.split(dir_path_without)[1] try: #self.data_upload(dir_path_without, tag_info, slice=False) archive_path = self.dirArchive(dir_path_without, tag_info) self.check_true_file_list.append(archive_path + '/' + dir_name) if os.path.exists(dir_path_without + "_slice/"): self.data_upload(dir_path_without, tag_info, slice=True) self.dirArchive(dir_path_without + "_slice", tag_info) generate_data_report.getTrueList(archive_path + '/' + dir_name, True) generate_data_report.main(archive_path + '/' + dir_name, True) return 0 except Exception as e: logger(1, str(e), LOG_FILE="../upload_list/error.log") logging.exception(e) return 1
def main(self, raw_dir_path, record_tag): dir_path = record_tag["input_dir"] tagging_module = record_tag["tagging_module"] tag_save_path = ''.join([dir_path, 'screen_cast/']) dpc_file_path = ''.join([dir_path, 'dmppcl.bag']) logs_path = ''.join([raw_dir_path, '/logs/']) bin_path = ''.join([dir_path, 'simulator_scenario/0/logger.bin']) if tagging_module == 3: self.case_finder(bin_path, tag_save_path, 3) elif tagging_module == 2: topic_names = ['/control/control_error'] tag_info = loadTag(dir_path) record_timestamp = int( tag_info["origin_record_tag"][0]["start"]) / 1000 print(record_timestamp) bag_data = self.getDataFromDpcbag(dpc_file_path, topic_names, record_timestamp) brake_timestamp, ego_label = self.parseBrakeTimestampFromBag( bag_data) brake_timestamp_bj = self.unixToBjclock(brake_timestamp) brake_timestamp_bj_1 = self.unixToBjclock(brake_timestamp - 1) nsec = math.modf(brake_timestamp)[0] * 1000 print 'brake case timestamp: ', brake_timestamp, brake_timestamp_bj decision_planning_log_file = self.getMatchedFilePaths( logs_path, 'ros_decision_planning_node.*') if decision_planning_log_file == []: obstacle_id = [0] id_list = [] else: log_file = self.filterLogFile(decision_planning_log_file) obstacle_id, id_list = self.parseDecisionPannningLog( log_file, brake_timestamp_bj, brake_timestamp_bj_1, int(nsec)) print 'obstacle id: ', obstacle_id tag = {} tag["obstacle_id"] = obstacle_id tag["id_list"] = {} if id_list != []: for id in id_list: if id not in tag["id_list"].keys(): tag["id_list"][id] = 0 tag["id_list"][id] += 1 saveTag(record_tag["input_dir"] + 'screen_cast/', tag, 'obstacle.json') id_json_path = ''.join( [record_tag["input_dir"], 'screen_cast/obstacle.json']) self.case_finder(bin_path, tag_save_path, 2, id_json_path, int(brake_timestamp), int(nsec), 5, 1) else: self.case_finder(bin_path, tag_save_path, 1, 0, int(record_tag["input_timestamp"]), int(000), 5, 1)