def vis(result_sha, data_root, result_root): def show_image_with_boxes(img, objects_res, object_gt, calib, save_path, height_threshold=0): img2 = np.copy(img) for obj in objects_res: box3d_pts_2d, _ = compute_box_3d(obj, calib.P) color_tmp = tuple([int(tmp * 255) for tmp in colors[obj.id % max_color]]) img2 = draw_projected_box3d(img2, box3d_pts_2d, color=color_tmp) text = 'ID: %d' % obj.id if box3d_pts_2d is not None: img2 = cv2.putText(img2, text, (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=color_tmp) img = Image.fromarray(img2) img = img.resize((width, height)) img.save(save_path) for seq in seq_list: image_dir = os.path.join(data_root, 'image_02/%s' % seq) calib_file = os.path.join(data_root, 'calib/%s.txt' % seq) result_dir = os.path.join(result_root, '%s/trk_withid/%s' % (result_sha, seq)) save_3d_bbox_dir = os.path.join(result_dir, '../../trk_image_vis/%s' % seq); mkdir_if_missing(save_3d_bbox_dir) # load the list images_list, num_images = load_list_from_folder(image_dir) print('number of images to visualize is %d' % num_images) start_count = 0 for count in range(start_count, num_images): image_tmp = images_list[count] if not is_path_exists(image_tmp): count += 1 continue image_index = int(fileparts(image_tmp)[1]) image_tmp = np.array(Image.open(image_tmp)) img_height, img_width, img_channel = image_tmp.shape result_tmp = os.path.join(result_dir, '%06d.txt'%image_index) # load the result if not is_path_exists(result_tmp): object_res = [] else: object_res = read_label(result_tmp) print('processing index: %d, %d/%d, results from %s' % (image_index, count+1, num_images, result_tmp)) calib_tmp = Calibration(calib_file) # load the calibration object_res_filtered = [] for object_tmp in object_res: if object_tmp.type not in type_whitelist: continue if hasattr(object_tmp, 'score'): if object_tmp.score < score_threshold: continue center = object_tmp.t object_res_filtered.append(object_tmp) num_instances = len(object_res_filtered) save_image_with_3dbbox_gt_path = os.path.join(save_3d_bbox_dir, '%06d.jpg' % (image_index)) show_image_with_boxes(image_tmp, object_res_filtered, [], calib_tmp, save_path=save_image_with_3dbbox_gt_path) print('number of objects to plot is %d' % (num_instances)) count += 1
def ask_overwrite_if_file_exists(filename: str, target: str) -> bool: file_url = utils.join_paths(target, filename) if utils.is_path_exists(file_url): do_overwrite = None while do_overwrite not in ('y', 'n'): Logger.logPrint( f'\nFile {filename} already exists, overwrite it ? (y/n)') do_overwrite = input().lower() return do_overwrite == 'y' else: return True
def login_weibo(): """ 微博登录 :return: """ is_exists_cookies_json = is_path_exists(COOKIES_JSON) if is_exists_cookies_json: print("cookies json 已存在") is_expiry = is_expiry_sub() if is_expiry: print("Cookies 即将过期 重新获取") print("帐号密码登录") driver_initial() driver = get_browser() try: print('准备登陆Weibo.cn网站...') driver.get(WEIBO_LOGIN_URL) # WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.ID, "loginAction"))) 该句相较于下句不起作用 WebDriverWait(driver, 10).until( ec.visibility_of_element_located((By.ID, "loginAction"))) elem_user = driver.find_element_by_id("loginName") elem_user.send_keys(Mine().username) # 用户名 elem_pwd = driver.find_element_by_id("loginPassword") elem_pwd.send_keys(Mine().password) # 密码 elem_sub = driver.find_element_by_id("loginAction") elem_sub.click() # 点击登陆,登录多次或异地登录可能会有验证码 WebDriverWait(driver, 20).until(ec.url_contains('m.weibo.cn')) sina_cookies = driver.get_cookies() # 包含多个 cookie 的字典列表 # for cookie in sina_cookies: # cookie['table'] = 'weibo_cookies' # with open(COOKIES_JSON, 'w', encoding="utf-8") as f: # 保存Cookies # f.write(json.dumps(sina_cookies, indent=4)) dump_dict_to_json(sina_cookies, COOKIES_JSON) print('<登陆成功>') driver.close() except Exception as e: print("Error: <登录失败> {}".format(e)) else: print("Cookies 登录") weibo_cookies = loads_json(COOKIES_JSON) driver_initial() driver = get_browser() driver.delete_all_cookies() driver.get("https://weibo.cn/") for wc in weibo_cookies: wc.pop('domain') driver.add_cookie(wc) driver.get("https://weibo.cn/")
def initial_configure() -> None: if not is_path_exists(COOKIES_JSON): login_weibo()
"title AstroSaveConverter 2.0 - Convert your Astroneer saves between Microsoft and Steam" ) except: pass args = get_args() conversion_type = Scenario.ask_conversion_type() try: if not args.savesPath: original_save_path = Scenario.ask_for_save_folder( conversion_type) else: original_save_path = args.savesPath if not utils.is_path_exists(original_save_path): raise FileNotFoundError except FileNotFoundError as e: Logger.logPrint( '\nSave folder or container not found, press any key to exit') Logger.logPrint(e, 'exception') utils.wait_and_exit(1) if conversion_type == AstroConvType.WIN2STEAM: windows_to_steam_conversion(original_save_path) elif conversion_type == AstroConvType.STEAM2WIN: steam_to_windows_conversion(original_save_path) Logger.logPrint(f'\nTask completed, press any key to exit') utils.wait_and_exit(0) except Exception as e:
def export_save_to_xbox(save: AstroSave, from_file: str, to_path: str) -> None: chunk_uuids, converted_chunks = save.convert_to_xbox(from_file) chunk_count = len(chunk_uuids) if chunk_count >= 10: Logger.logPrint( f'The selected save contains {chunk_count} which is over the 9 chunks limit AstroSaveconverter can handle yet' ) Logger.logPrint( f'Congrats for having such a huge save, please open an issue on the GitHub :D' ) for i in range(chunk_count): # The file name is the HEX upper form of the uuid chunk_name = save.chunks_names[i] Logger.logPrint(f'UUID as file name: {chunk_name}', "debug") target_full_path = utils.join_paths(to_path, chunk_name) Logger.logPrint(f'Chunk file written to: {target_full_path}', "debug") # Regenerating chunk name if it already exists. Very, very unlikely while utils.is_path_exists(target_full_path): Logger.logPrint(f'UUID: {chunk_name} already exists ! (omg)', "debug") chunk_uuids[i] = save.regenerate_uuid(i) chunk_name = save.chunks_names[i] Logger.logPrint(f'Regenerated UUID: {chunk_name}', "debug") target_full_path = utils.join_paths(to_path, chunk_name) # TODO [enhance] raise exception if can't write, catch it then delete all the chunks already written and exit utils.write_buffer_to_file(target_full_path, converted_chunks[i]) # Container is updated only after all the chunks of the save have been written successfully container_file_name = Container.get_containers_list(to_path)[0] container_full_path = utils.join_paths(to_path, container_file_name) with open(container_full_path, "r+b") as container: container.read(4) current_container_chunk_count = int.from_bytes(container.read(4), byteorder='little') new_container_chunk_count = current_container_chunk_count + chunk_count container.seek(-4, 1) container.write( new_container_chunk_count.to_bytes(4, byteorder='little')) chunks_buffer = BytesIO() for i in range(chunk_count): total_written_len = 0 encoded_save_name = save.name.encode('utf-16le', errors='ignore') total_written_len += chunks_buffer.write(encoded_save_name) if chunk_count > 1: # Multi-chunks save. Adding metadata, format: '$${i}${chunk_count}$1' chunk_metadata = f'$${i}${chunk_count}$1' encoded_metadata = chunk_metadata.encode('utf-16le', errors='ignore') total_written_len += chunks_buffer.write(encoded_metadata) chunks_buffer.write(b"\00" * (144 - total_written_len)) chunks_buffer.write(chunk_uuids[i].bytes_le) Logger.logPrint(f'Editing container: {container_full_path}', "debug") utils.append_buffer_to_file(container_full_path, chunks_buffer)
def vis(result_sha, data_root, result_root): def show_image_with_boxes(img, velo, objects_res, objects_res_det, objects_res_raw, labeldata, object_gt, calib, save_path, height_threshold=0, show_lidar=True, save_image=False): img2 = np.copy(img) for obj in objects_res: box3d_pts_2d, _ = compute_box_3d(obj, calib.P) color_tmp = tuple( [int(tmp * 255) for tmp in colors[obj.id % max_color]]) img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(0, 0, 255)) text = 'Tracked ID: %d, Type: %s' % (obj.id, obj.type) if box3d_pts_2d is not None: img2 = cv2.putText( img2, text, (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=(0, 0, 255)) for obj in objects_res_det: box3d_pts_2d, _ = compute_box_3d(obj, calib.P) color_tmp = tuple( [int(tmp * 255) for tmp in colors[obj.id % max_color]]) img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(0, 255, 0)) text = 'Detection ID: %d, Type: %s' % (obj.id, obj.type) if box3d_pts_2d is not None: img2 = cv2.putText( img2, text, (int(box3d_pts_2d[3, 0]), int(box3d_pts_2d[3, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=(0, 255, 0)) import itertools labeldata, labeldata2 = itertools.tee(labeldata) for obj in labeldata: # print("here") box3d_pts_2d, _ = compute_box_3d(obj, calib.P) img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(255, 0, 0)) text = 'GT, Type: %s' % (obj.type) if box3d_pts_2d is not None: # print("also") print(text) img2 = cv2.putText( img2, text, (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=(255, 0, 0)) # for obj in objects_res_raw: # box3d_pts_2d, _ = compute_box_3d(obj, calib.P) # color_tmp = tuple([int(tmp * 255) # for tmp in colors[obj.id % max_color]]) # img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(255,0,0)) # text = 'Estimate ID: %d' % obj.id # if box3d_pts_2d is not None: # img2 = cv2.putText(img2, text, (int(box3d_pts_2d[2, 0]), int( # box3d_pts_2d[2, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=(255,0,0)) if show_lidar: show_lidar_with_boxes(velo, objects=labeldata2, calib=calib, objects_pred=objects_res) img = Image.fromarray(img2) img = img.resize((width, height)) cv2.imshow("Image", img2) cv2.waitKey() if save_image: print("Saving Image at", save_path) img.save(save_path) return img2 for seq in seq_list: image_dir = os.path.join(data_root, 'image_02/%s' % seq) calib_file = os.path.join(data_root, 'calib/%s.txt' % seq) label_file = os.path.join(data_root, 'label_02/%s.txt' % seq) velo_dir = os.path.join(data_root, 'velodyne/%s' % seq) result_dir = [ os.path.join(result_root, '%s/trk_withid/%s' % (result_sha[0], seq)), os.path.join(result_root, '%s/trk_withid/%s' % (result_sha[1], seq)), os.path.join(result_root, '%s/trk_withid/%s' % (result_sha[2], seq)) ] save_3d_bbox_dir = os.path.join( result_root, '%s/trk_image_vis/%s' % ("Combined_Final_WithLabel", seq)) mkdir_if_missing(save_3d_bbox_dir) # load the list images_list, num_images = load_list_from_folder(image_dir) velo_list, num_velo = load_list_from_folder(velo_dir) print('number of images to visualize is %d' % num_images) start_count = 0 filecontent = np.array([f.split() for f in open(label_file, 'r')]) # alllabels = np.unique(filecontent[:,2]) # labels = ['Car', 'Pedestrian', 'Cyclist'] # finallabelset = [x for x in alllabels if x not in labels] # print(alllabels) # print(finallabelset) # for val in finallabelset: # filecontent = filecontent[filecontent[:,2]!=val,:] # print(np.unique(filecontent[:,2])) size = (width, height) out = cv2.VideoWriter(f'{result_root}/{seq}.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15, size) num_images = 1 for count in range(start_count, num_images): image_tmp = images_list[count] velo_tmp = velo_list[count] if not is_path_exists(image_tmp): count += 1 continue image_index = int(fileparts(image_tmp)[1]) image_tmp = np.array(Image.open(image_tmp)) img_height, img_width, img_channel = image_tmp.shape filecontentframe = filecontent[filecontent[:, 0] == str(image_index), :] print(len(filecontentframe)) print(f"Labels for frame {image_index}", np.unique(filecontentframe[:, 2])) labeldata = (Object3d(getstringfromarray(line[2:])) for line in filecontentframe) object_res = [] object_res_det = [] object_res_raw = [] for dirt in result_dir: result_tmp = os.path.join(dirt, '%06d.txt' % image_index) # load the result if is_path_exists(result_tmp): object_res = object_res + read_label(result_tmp) result_tmp_det = os.path.join(dirt, 'det%06d.txt' % image_index) # load the result if is_path_exists(result_tmp_det): object_res_det = object_res_det + \ read_label(result_tmp_det) result_tmp_raw = os.path.join(dirt, 'raw%06d.txt' % image_index) # load the result if is_path_exists(result_tmp_raw): object_res_raw = object_res_raw + \ read_label(result_tmp_raw) print('processing index: %d, %d/%d, results from %s' % (image_index, count + 1, num_images, result_tmp)) calib_tmp = Calibration(calib_file) # load the calibration object_res_filtered = [] for object_tmp in object_res: if object_tmp.type not in type_whitelist: continue if hasattr(object_tmp, 'score'): if object_tmp.score < score_threshold: continue center = object_tmp.t object_res_filtered.append(object_tmp) object_res_filtered_det = [] for object_tmp in object_res_det: if object_tmp.type not in type_whitelist: continue if hasattr(object_tmp, 'score'): if object_tmp.score < score_threshold: continue center = object_tmp.t object_res_filtered_det.append(object_tmp) object_res_filtered_raw = [] for object_tmp in object_res_raw: if object_tmp.type not in type_whitelist: continue # if hasattr(object_tmp, 'score'): # if object_tmp.score < score_threshold: # continue center = object_tmp.t object_res_filtered_raw.append(object_tmp) num_instances = len(object_res_filtered) save_image_with_3dbbox_gt_path = os.path.join( save_3d_bbox_dir, '%06d.jpg' % (image_index)) velodyne_scan = load_velo_scan(velo_tmp, np.float32, n_vec=4)[:, 0:4] img = show_image_with_boxes( image_tmp, velodyne_scan, object_res_filtered, object_res_filtered_det, object_res_filtered_raw, labeldata, [], calib_tmp, save_path=save_image_with_3dbbox_gt_path) print('number of objects to plot is %d, %d, %d' % (num_instances, len(object_res_filtered_det), len(object_res_filtered_raw))) count += 1 out.write(img) out.release()