def main(): _params = { 'break_lines': 1, 'field_sep': ' ', } paramparse.process_dict(_params) break_lines = _params['break_lines'] field_sep = _params['field_sep'] try: in_txt = Tk().clipboard_get() except BaseException as e: print('Tk().clipboard_get() failed: {}'.format(e)) return in_dict = literal_eval(in_txt) out_txt = '' # in_lines = in_txt.splitlines() # in_line_id = -1 out_txt2 = in_txt.strip().lstrip('{').rstrip('}').replace(' ', ' ') for _var_name in in_dict: # out_line = '{}'.format(in_dict[_var_name]) new_line = ':ivar {}: {}'.format(_var_name, in_dict[_var_name]) new_line_broken = new_line out_txt2 = out_txt2.replace("'{}': ".format(_var_name), ':ivar {}: '.format(_var_name)) if break_lines: # new_line_broken = re.sub("(.{70})", "\\1\n", new_line, 0, re.DOTALL) # new_line_broken = insert_newlines_by_matching(in_line_id, in_lines, out_line) new_line_broken = insert_newlines(new_line, 70) out_txt += ' ' + new_line_broken + '\n\n' print('out_txt:\n{}'.format(out_txt)) print('out_txt2:\n{}'.format(out_txt2)) # out_txt_final = out_txt + '\n\n' + out_txt2 try: pyperclip.copy(out_txt2) spam = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e))
def main(): params = { 'mode': 0, 'sleep_t': 0, 'switch_t': 3, 'max_t': 120, } paramparse.process_dict(params) sleep_t = params['sleep_t'] max_t = float(params['max_t']) switch_t = params['switch_t'] mode = params['mode'] print( f'Waiting for {switch_t} seconds to allow switching to the target window' ) time.sleep(switch_t) if mode == 0: key = b'key Page_Down ' else: key = b'key Return ' start_t = time.time() while True: try: keypress(key) # pyautogui.press("pagedown") except BaseException as e: print('BaseException: {}'.format(e)) break end_t = time.time() time_elapsed = end_t - start_t if time_elapsed > max_t: break print(time_elapsed) if sleep_t: time.sleep(sleep_t)
def main(): _params = { 'field_id': -1, 'field_sep': ' ', } paramparse.process_dict(_params) field_id = _params['field_id'] field_sep = _params['field_sep'] try: in_txt = Tk().clipboard_get() except BaseException as e: print('Tk().clipboard_get() failed: {}'.format(e)) return in_txt = in_txt.lstrip('#') tokens = in_txt.strip().split(field_sep) if field_id < 0: if tokens[0].startswith('python'): field_id = 2 elif tokens[0].endswith('.py'): field_id = 1 else: field_id = 0 print('tokens: {}'.format(tokens)) out_tokens = tokens[field_id:] print('out_tokens: {}'.format(out_tokens)) out_txt = field_sep.join(out_tokens) print('out_txt: {}'.format(out_txt)) try: pyperclip.copy(out_txt) spam = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e))
def main(): params = { 'src_path': '.', 'save_path': '', 'save_root_dir': '', 'img_ext': 'jpg', 'show_img': 1, 'del_src': 0, 'start_id': 0, 'n_frames': 0, 'width': 0, 'height': 0, 'fps': 30, # 'codec': 'FFV1', # 'ext': 'avi', 'codec': 'H264', 'ext': 'mkv', 'out_postfix': '', 'reverse': 0, 'move_src': 0, 'use_skv': 0, 'disable_suffix': 0, 'read_in_batch': 1, 'placement_type': 1, 'recursive': 0, } paramparse.process_dict(params) _src_path = params['src_path'] save_path = params['save_path'] img_ext = params['img_ext'] show_img = params['show_img'] del_src = params['del_src'] start_id = params['start_id'] n_frames = params['n_frames'] __width = params['width'] __height = params['height'] fps = params['fps'] use_skv = params['use_skv'] codec = params['codec'] ext = params['ext'] out_postfix = params['out_postfix'] reverse = params['reverse'] save_root_dir = params['save_root_dir'] move_src = params['move_src'] disable_suffix = params['disable_suffix'] read_in_batch = params['read_in_batch'] placement_type = params['placement_type'] recursive = params['recursive'] img_exts = ['.jpg', '.jpeg', '.png', '.bmp', '.tif'] src_root_dir = '' if os.path.isdir(_src_path): if recursive: src_paths = [_src_path] else: src_files = [ k for k in os.listdir(_src_path) for _ext in img_exts if k.endswith(_ext) ] if not src_files: # src_paths = [os.path.join(_src_path, k) for k in os.listdir(_src_path) if # os.path.isdir(os.path.join(_src_path, k))] src_paths_gen = [[ os.path.join(dirpath, d) for d in dirnames if any([ os.path.splitext(f.lower())[1] in img_exts for f in os.listdir(os.path.join(dirpath, d)) ]) ] for (dirpath, dirnames, filenames) in os.walk(_src_path, followlinks=True)] src_paths = [ item for sublist in src_paths_gen for item in sublist ] src_root_dir = os.path.abspath(_src_path) else: src_paths = [_src_path] print('Found {} image sequence(s):\n{}'.format(len(src_paths), pformat(src_paths))) elif os.path.isfile(_src_path): print('Reading source image sequences from: {}'.format(_src_path)) src_paths = [ x.strip() for x in open(_src_path).readlines() if x.strip() ] n_seq = len(src_paths) if n_seq <= 0: raise SystemError( 'No input sequences found in {}'.format(_src_path)) print('n_seq: {}'.format(n_seq)) else: raise IOError('Invalid src_path: {}'.format(_src_path)) if recursive: print('searching for images recursively') if reverse == 1: print('Writing the reverse sequence') elif reverse == 2: print('Appending the reverse sequence') print('placement_type: {}'.format(placement_type)) exit_prog = 0 n_src_paths = len(src_paths) cwd = os.getcwd() for src_id, src_path in enumerate(src_paths): seq_name = os.path.basename(src_path) print('\n{}/{} Reading source images from: {}'.format( src_id + 1, n_src_paths, src_path)) src_path = os.path.abspath(src_path) if move_src: rel_src_path = os.path.relpath(src_path, os.getcwd()) dst_path = os.path.join(cwd, 'i2v', rel_src_path) else: dst_path = '' if recursive: src_file_gen = [[ os.path.join(dirpath, f) for f in filenames if os.path.splitext(f.lower())[1] in img_exts ] for (dirpath, dirnames, filenames) in os.walk(src_path, followlinks=True)] src_files = [item for sublist in src_file_gen for item in sublist] else: src_files = [ k for k in os.listdir(src_path) for _ext in img_exts if k.endswith(_ext) ] n_src_files = len(src_files) if n_src_files <= 0: raise SystemError('No input frames found') src_files.sort(key=sortKey) print('n_src_files: {}'.format(n_src_files)) if reverse == 1: src_files = src_files[::-1] elif reverse == 2: src_files += src_files[::-1] n_src_files *= 2 _width, _height = __width, __height if os.path.exists(save_path): dst_mtime = os.path.getmtime(save_path) src_mtime = os.path.getmtime(src_path) dst_mtime_fmt = datetime.fromtimestamp(dst_mtime).strftime( '%Y-%m-%d %H:%M:%S') src_mtime_fmt = datetime.fromtimestamp(src_mtime).strftime( '%Y-%m-%d %H:%M:%S') print('Output video file already exists: {}'.format(save_path)) if dst_mtime >= src_mtime: print( 'Last modified time: {} is not older than the source: {} so skipping it' .format(dst_mtime_fmt, src_mtime_fmt)) save_path = '' continue else: print( 'Last modified time: {} is older than the source: {} so overwriting it' .format(dst_mtime_fmt, src_mtime_fmt)) save_dir = os.path.dirname(save_path) if save_dir and not os.path.isdir(save_dir): os.makedirs(save_dir) src_images = [] print('orig: {} x {}'.format(_width, _height)) if read_in_batch: print('reading all images in batch') src_images = [read_image(src_path, k) for k in tqdm(src_files)] if _height <= 0 and _width <= 0: heights, widths = zip(*[k.shape[:2] for k in src_images]) _height, _width = max(heights), max(widths) elif _height <= 0: _height, _width = sizeAR(src_images[0], width=_width) elif _width <= 0: _height, _width = sizeAR(src_images[0], height=_height) else: if _height <= 0 or _width <= 0: temp_img = cv2.imread(os.path.join(src_path, src_files[0])) _height, _width, _ = temp_img.shape print('inferred: {} x {}'.format(_width, _height)) if not save_path: save_fname = os.path.basename(src_path) if not disable_suffix: save_fname = '{}_{}'.format(save_fname, fps) if _height > 0 and _width > 0: save_fname = '{}_{}x{}'.format(save_fname, _width, _height) if out_postfix: save_fname = '{}_{}'.format(save_fname, out_postfix) if reverse: save_fname = '{}_r{}'.format(save_fname, reverse) save_path = os.path.join(os.path.dirname(src_path), '{}.{}'.format(save_fname, ext)) if src_root_dir and save_root_dir: save_path = save_path.replace(src_root_dir, save_root_dir) print('save_path: {}'.format(save_path)) print('src_root_dir: {}'.format(src_root_dir)) print('save_root_dir: {}'.format(save_root_dir)) print('save_path: {}'.format(save_path)) # sys.exit() if use_skv: video_out = skvideo.io.FFmpegWriter( save_path, outputdict={ '-vcodec': 'libx264', # use the h.264 codec '-crf': '0', # set the constant rate factor to 0, which is lossless '-preset': 'veryslow' # the slower the better compression, in princple, try # other options see https://trac.ffmpeg.org/wiki/Encode/H.264 }) elif codec == 'H265': video_out = VideoWriterGPU(save_path, fps, (_width, _height)) else: fourcc = cv2.VideoWriter_fourcc(*codec) video_out = cv2.VideoWriter(save_path, fourcc, fps, (_width, _height)) if video_out is None: raise IOError( 'Output video file could not be opened: {}'.format(save_path)) print('Saving {}x{} output video to {}'.format(_width, _height, save_path)) frame_id = start_id pause_after_frame = 0 print_diff = max(1, int(n_src_files / 100)) start_t = time.time() while True: if read_in_batch: image = src_images[frame_id] else: filename = src_files[frame_id] file_path = os.path.join(src_path, filename) if not os.path.exists(file_path): raise SystemError( 'Image file {} does not exist'.format(file_path)) image = cv2.imread(file_path) image = resizeAR(image, _width, _height, placement_type=placement_type) if show_img: cv2.imshow(seq_name, image) k = cv2.waitKey(1 - pause_after_frame) & 0xFF if k == 27: exit_prog = 1 break elif k == ord('q'): break elif k == 32: pause_after_frame = 1 - pause_after_frame if use_skv: video_out.writeFrame( image[:, :, ::-1]) # write the frame as RGB not BGR else: video_out.write(image) frame_id += 1 if frame_id % print_diff == 0: end_t = time.time() try: proc_fps = float(print_diff) / (end_t - start_t) except: proc_fps = 0 sys.stdout.write( '\rDone {:d}/{:d} frames at {:.4f} fps'.format( frame_id - start_id, n_src_files - start_id, proc_fps)) sys.stdout.flush() start_t = end_t if (frame_id - start_id) >= n_frames > 0: break if frame_id >= n_src_files: break sys.stdout.write('\n') sys.stdout.flush() if use_skv: video_out.close() # close the writer else: video_out.release() if show_img: cv2.destroyWindow(seq_name) if move_src or del_src: move_or_del_files(src_path, src_files, dst_path) save_path = '' if exit_prog: break
def main(): params = { 'root_dir': '.', 'folder_prefix': '', 'start_id': 0, 'end_id': -1, 'folder_start_id': 1, 'folder_end_id': 100, 'search_str': '', 'find_unique_names': 1, 'recursive': 0, 'collage': 0, 'move_into_subdirs': 0, 'excluded': [], 'src_folders': [], } argv = sys.argv paramparse.process_dict(params) root_dir = params['root_dir'] folder_prefix = params['folder_prefix'] folder_start_id = params['folder_start_id'] folder_end_id = params['folder_end_id'] search_str = params['search_str'] find_unique_names = params['find_unique_names'] recursive = params['recursive'] collage = params['collage'] excluded = params['excluded'] start_id = params['start_id'] end_id = params['end_id'] move_into_subdirs = params['move_into_subdirs'] src_folders = params['src_folders'] exclusions = ['Thumbs.db', 'sync.ffs_db'] img_exts = ('.jpg', '.bmp', '.jpeg', '.png', '.tif', '.tiff', '.webp', '.gif') excluded = [str(k) for k in excluded] if not src_folders: if folder_prefix: print('Looking for {:s} in folders beginning with {:s} and IDs going from {:d} to {:d}'.format( search_str, folder_prefix, folder_start_id, folder_end_id)) src_folders = ['{:s} {:d}'.format(folder_prefix, folder_id) for folder_id in range(folder_start_id, folder_end_id + 1)] else: src_folders = [os.path.join(root_dir, k) for k in os.listdir(root_dir) if k not in excluded and os.path.isdir(os.path.join(root_dir, k))] else: src_folders = [str(k) for k in src_folders] if root_dir: src_folders = [os.path.join(root_dir, k) for k in src_folders] src_folders = sorted(src_folders, key=sortKey) if end_id <= start_id: end_id = len(src_folders) - 1 src_folders = src_folders[start_id:end_id + 1] print('excluded:\n{}'.format(pformat(excluded))) print('src_folders:\n{}'.format(pformat(src_folders))) # exit() total_files_found = 0 total_files_searched = 0 total_unique_names = 0 all_unique_names = [] all_collage_images = [] matching_files = {} n_src_files = {} if src_folders: collage_path = os.path.join(os.path.dirname(src_folders[0]), 'collage') collage_path = os.path.abspath(collage_path) counts_collage_path = os.path.join(collage_path, 'counts') if not os.path.isdir(counts_collage_path): os.makedirs(counts_collage_path) move_txt_file = 'ffif_move.txt' for src_folder in src_folders: src_folder = os.path.abspath(src_folder) if src_folder == collage_path: continue src_folder_name = os.path.basename(src_folder) if not os.path.exists(src_folder): print('src_folder does not exist: {}'.format(src_folder)) break if recursive: src_file_gen = [[os.path.join(dirpath, f) for f in filenames if os.path.splitext(f.lower())[1] in img_exts] for (dirpath, dirnames, filenames) in os.walk(src_folder, followlinks=True)] src_paths = [item for sublist in src_file_gen for item in sublist] else: src_paths = [os.path.join(src_folder, f) for f in os.listdir(src_folder) if f not in exclusions and os.path.isfile(os.path.join(src_folder, f)) and os.path.splitext(f.lower())[1] in img_exts] src_files = [os.path.basename(f) for f in src_paths] n_src_files[src_folder] = len(src_files) search_results = [f for f in src_files if search_str in f] if len(search_results) > 0: if search_str: print('Found {:d} matching files in {:s}'.format(len(search_results), src_folder)) print('\n'.join(search_results) + '\n') total_files_found += len(search_results) matching_files[src_folder] = search_results else: print('Done searching {:s} with {:d} files'.format(src_folder, n_src_files[src_folder])) unique_names = [] # collage_images = [] if find_unique_names: src_files_no_ext = [os.path.splitext(f)[0] for f in src_files] if src_files_no_ext: stripped_names = [strip_from_first_num(k) for k in src_files_no_ext if strip_from_first_num(k)] # unique_names = list(set(stripped_names)) unique_names = Counter(stripped_names) if collage: for unique_name in unique_names: # unique_name_matching_files = [ # (i, k) for i, k in enumerate(src_files_no_ext) if # longest_common_substring(k, unique_name).size > 0.7 * len(unique_name)] # # assert unique_name_matching_files, "No matching files found for {} in {}".format( # unique_name, src_folder) # unique_name_count = len(unique_name_matching_files) # first_idx = unique_name_matching_files[0][0] unique_name_count = unique_names[unique_name] first_idx = stripped_names.index(unique_name) _name, _ext = os.path.splitext(src_files[first_idx]) dst_file = '{} {} {}{}'.format(unique_name, src_folder_name, unique_name_count, _ext) shutil.copy(src_paths[first_idx], os.path.join(collage_path, dst_file)) counts_dst_file = '{:06d} {} {}{}'.format(unique_name_count, unique_name, src_folder_name, _ext) shutil.copy(src_paths[first_idx], os.path.join(counts_collage_path, counts_dst_file)) # collage_images.append(cv2.imread(src_paths[first_idx])) if move_into_subdirs: with open(move_txt_file, 'a') as fid: for unique_name in unique_names: matching_src_files = [(f, src_paths[i]) for i, f in enumerate(src_files) if f.startswith(unique_name)] for matching_src_file, matching_src_path in matching_src_files: dst_dir = os.path.join(src_folder, unique_name) os.makedirs(dst_dir, exist_ok=True) dst_path = os.path.join(dst_dir, matching_src_file) txt = '{}\t{}'.format(matching_src_path, dst_path) fid.write(txt) # print(txt) shutil.move(matching_src_path, dst_path) # unique_names.append(src_files_no_ext[0]) # if collage: # # collage_images.append(cv2.imread(src_paths[0])) # _name, _ext = os.path.splitext(src_files[0]) # _name = remove_num(_name) # unique_name_matching_files = [ # k for k in src_files_no_ext if # longest_common_substring(k, unique_names[-1]).size > 0.7 * len(unique_names[-1])] # dst_file = '{} {} {}{}'.format(_name, src_folder_name, len(unique_name_matching_files), _ext) # shutil.copy(src_paths[0], os.path.join(collage_path, dst_file)) # counts_dst_file = '{:06d} {} {}{}'.format(len(unique_name_matching_files), _name, # src_folder_name, _ext) # shutil.copy(src_paths[0], os.path.join(counts_collage_path, counts_dst_file)) # for i in range(1, len(src_files_no_ext)): # commonprefix = os.path.commonprefix(src_files_no_ext[i - 1:i + 1]) # if not commonprefix: # unique_names.append(src_files_no_ext[i]) # if collage: # # collage_images.append(cv2.imread(src_paths[i])) # _name, _ext = os.path.splitext(src_files[i]) # _name = remove_num(_name) # unique_name_matching_files = [ # k for k in src_files_no_ext if # longest_common_substring(k, unique_names[-1]).size > 0.7 * len(unique_names[-1])] # dst_file = '{} {} {}{}'.format(_name, src_folder_name, len(unique_name_matching_files), # _ext) # shutil.copy(src_paths[i], os.path.join(collage_path, dst_file)) # counts_dst_file = '{:06d} {} {}{}'.format(len(unique_name_matching_files), _name, # src_folder_name, _ext) # shutil.copy(src_paths[i], os.path.join(counts_collage_path, counts_dst_file)) # # continue # # non_prefix = src_files_no_ext[i].replace(commonprefix, '') # found_digit = 0 # for _c in non_prefix: # if str(_c).isdigit(): # found_digit = 1 # break # if str(_c).isalpha(): # unique_names.append(src_files_no_ext[i]) # if collage: # # collage_images.append(cv2.imread(src_paths[i])) # _name, _ext = os.path.splitext(src_files[i]) # _name = remove_num(_name) # unique_name_matching_files = [ # k for k in src_files_no_ext if # longest_common_substring(k, unique_names[-1]).size > 0.7 * len(unique_names[-1])] # dst_file = '{} {} {}{}'.format(_name, src_folder_name, # len(unique_name_matching_files), # _ext) # shutil.copy(src_paths[i], os.path.join(collage_path, dst_file)) # counts_dst_file = '{:06d} {} {}{}'.format(len(unique_name_matching_files), _name, # src_folder_name, _ext) # shutil.copy(src_paths[i], os.path.join(counts_collage_path, counts_dst_file)) # break if unique_names: print('Found {} unique names:\n{}\n'.format(len(unique_names), unique_names)) total_unique_names += len(unique_names) all_unique_names += unique_names # if collage: # all_collage_images += collage_images total_files_searched += len(src_files) def extract_name(_str): _str_list = _str.split('_') _names = [] for i, _substr in enumerate(_str_list): if not all(k.isalpha() for k in _substr): break _names.append(_substr) _name = ' '.join(_names) return _name print('{:d} files searched'.format(total_files_searched)) all_unique_names.sort() # all_unique_names_proc = list(map(extract_name, all_unique_names)) all_unique_names_proc = [x for x in map(extract_name, all_unique_names) if x.strip()] # all_unique_names_cmb = list(map(lambda x: '\t'.join(x), zip(all_unique_names, all_unique_names_proc))) if find_unique_names: print('{:d} unique names found:\n{}'.format(total_unique_names, '\n'.join(all_unique_names_proc))) if total_files_found > 0: print('\n{} matching files found in folders {}'.format( total_files_found, [os.path.relpath(k, os.getcwd()) for k in matching_files.keys()])) else: print('\nNo matching files found') n_src_files = [(os.path.relpath(k, os.getcwd()), v) for k, v in sorted(n_src_files.items(), key=lambda item: item[1])] print('\nn_src_files:\n{}'.format(pformat(n_src_files)))
def main(): _params = { 'root_dir': '/data', 'actor_id': 5, 'start_id': 1, 'end_id': -1, 'ignored_region_only': 0, 'speed': 0.5, 'show_img': 0, 'quality': 3, 'resize': 0, 'mode': 0, 'auto_progress': 0, } bkg_occl_seq = [24, 28, 47, 48, 49, 54] paramparse.process_dict(_params) root_dir = _params['root_dir'] actor_id = _params['actor_id'] start_id = _params['start_id'] ignored_region_only = _params['ignored_region_only'] end_id = _params['end_id'] show_img = _params['show_img'] params = ParamDict().__dict__ actors = params['mot_actors'] sequences = params['mot_sequences'] actor = actors[actor_id] actor_sequences = sequences[actor] if end_id <= start_id: end_id = len(actor_sequences) - 1 print('root_dir: {}'.format(root_dir)) print('actor_id: {}'.format(actor_id)) print('start_id: {}'.format(start_id)) print('end_id: {}'.format(end_id)) print('actor: {}'.format(actor)) print('actor_sequences: {}'.format(actor_sequences)) n_frames_list = [] _pause = 1 __pause = 1 for seq_id in range(start_id, end_id + 1): seq_name = actor_sequences[seq_id] fname = '{:s}/{:s}/Annotations/xml/{:s}.xml'.format( root_dir, actor, seq_name) tree = ET.parse(fname) root = tree.getroot() out_seq_name = 'detrac_{}_{}'.format(seq_id + 1, seq_name) out_fname = '{:s}/{:s}/Annotations/{:s}.txt'.format( root_dir, actor, out_seq_name) out_fid = open(out_fname, 'w') ignored_region_obj = tree.find('ignored_region') n_ignored_regions = 0 for bndbox in ignored_region_obj.iter('box'): if bndbox is None: continue xmin = float(bndbox.attrib['left']) ymin = float(bndbox.attrib['top']) width = float(bndbox.attrib['width']) height = float(bndbox.attrib['height']) out_fid.write('-1,-1,{:f},{:f},{:f},{:f},-1,-1,-1,-1,-1\n'.format( xmin, ymin, width, height)) n_ignored_regions += 1 if ignored_region_only: out_fid.close() continue img_dir = '{:s}/{:s}/Images/{:s}'.format(root_dir, actor, out_seq_name) frames = glob.glob('{:s}/*.jpg'.format(img_dir)) n_frames = len(frames) n_frames_list.append(n_frames) seq_occluded_dict = {} skip_seq = 0 print('Processing sequence {:d} :: {:s} n_ignored_regions: {}'.format( seq_id, seq_name, n_ignored_regions)) for frame_obj in tree.iter('frame'): target_list = frame_obj.find('target_list') frame_id = int(frame_obj.attrib['num']) if show_img: frame_path = os.path.join(img_dir, 'image{:06d}.jpg'.format(frame_id)) frame = cv2.imread(frame_path) obj_frame = np.copy(frame) if frame is None: raise IOError( 'Failed to read frame: {}'.format(frame_path)) cv2.putText(frame, str(frame_id), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1, cv2.LINE_AA) occluded = [] occluded_dict = {} obj_dict = {} for obj in target_list.iter('target'): bndbox = obj.find('box') obj_id = int(obj.attrib['id']) xmin = float(bndbox.attrib['left']) ymin = float(bndbox.attrib['top']) width = float(bndbox.attrib['width']) height = float(bndbox.attrib['height']) assert obj_id not in obj_dict, "duplicate object found" obj_dict[obj_id] = (xmin, ymin, width, height) for occ_idx, occ_obj in enumerate(obj.iter('occlusion')): occlusion = occ_obj.find('region_overlap') occ_status = int(occlusion.attrib['occlusion_status']) occ_id = int(occlusion.attrib['occlusion_id']) occ_xmin = float(occlusion.attrib['left']) occ_ymin = float(occlusion.attrib['top']) occ_width = float(occlusion.attrib['width']) occ_height = float(occlusion.attrib['height']) if occ_status == 0: """occluded by another obj""" _obj_id = obj_id _occ_id = occ_id elif occ_status == 1: """occluding another obj""" _obj_id = occ_id _occ_id = obj_id elif occ_status == -1: """occluded by background""" """"seems extremely unreliable so ignoring""" # _obj_id = obj_id # _occ_id = occ_id continue else: raise AssertionError( 'Invalid occ_status: {}'.format(occ_status)) # assert _obj_id not in occluded_dict, "duplicate occlusion found" if _obj_id not in occluded_dict: occluded_dict[_obj_id] = [] occluded_dict[_obj_id].append( (_occ_id, occ_status, occ_xmin, occ_ymin, occ_width, occ_height)) occluded.append((obj_id, occ_status, occ_id, occ_xmin, occ_ymin, occ_width, occ_height)) if occ_idx > 0: raise AssertionError( 'Multiple occluding objects found') for obj_id in obj_dict: xmin, ymin, width, height = obj_dict[obj_id] xmax, ymax = xmin + width, ymin + height obj_img = np.zeros((int(height), int(width), 1), dtype=np.uint8) obj_img.fill(64) if obj_id in occluded_dict: if show_img: _obj_frame = np.copy(obj_frame) drawBox(_obj_frame, xmin, ymin, xmax, ymax, label=str(obj_id), box_color=(255, 255, 255)) # __pause = imshow('_obj_frame', _obj_frame, __pause) for _occluded in occluded_dict[obj_id]: occ_id, occ_status, occ_xmin, occ_ymin, occ_width, occ_height = _occluded occ_xmax, occ_ymax = occ_xmin + occ_width, occ_ymin + occ_height start_x, end_x = int(occ_xmin - xmin), int(occ_xmax - xmin) start_y, end_y = int(occ_ymin - ymin), int(occ_ymax - ymin) # assert start_x >= 0 and start_y >= 0, "Invalid occlusion region start: {}".format(_occluded) # assert end_x <= width and end_y <= height, \ # "Invalid occlusion region end: {} for obj: {}\n{}, {}".format( # _occluded, obj_dict[obj_id], (end_x, end_y), (width, height)) start_x, start_y = max(start_x, 0), max(start_y, 0) end_x, end_y = min(end_x, width), min(end_y, height) obj_img[int(start_y):int(end_y), int(start_x):int(end_x)] = 192 n_occluded_pix = np.count_nonzero(obj_img == 192) occluded_ratio = float(n_occluded_pix) / float( obj_img.size) if show_img: _obj_img = resizeAR(obj_img, 500) _obj_frame = resizeAR(_obj_frame, 1920, 1080) cv2.putText( _obj_img, str('{}-{} : {:.2f}'.format( obj_id, occ_id, occluded_ratio)), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv2.LINE_AA) __pause, k = imshow(('obj_img', '_obj_frame'), (_obj_img, _obj_frame), __pause) if k == ord('q'): print("skip_seq") skip_seq = 1 break else: occ_id = occ_xmin = occ_ymin = occ_width = occ_height = -1 occluded_ratio = 0 out_str = '{:d},{:d},{:f},{:f},{:f},{:f},1,-1,-1,-1,{:f}\n'.format( frame_id, obj_id, xmin, ymin, width, height, occluded_ratio) out_fid.write(out_str) seq_occluded_dict[frame_id] = occluded_dict if show_img: drawBox(frame, xmin, ymin, xmax, ymax, label=str(obj_id), box_color=(255, 255, 255), is_dotted=(occluded_ratio != 0)) if skip_seq: break if show_img: for _occ in occluded: obj_id, occ_status, occ_id, occ_xmin, occ_ymin, occ_width, occ_height = _occ if occ_status == 1: box_color = (0, 0, 255) elif occ_status == 0: box_color = (255, 0, 0) elif occ_status == -1: box_color = (0, 255, 0) occ_xmax, occ_ymax = occ_xmin + occ_width, occ_ymin + occ_height drawBox(frame, occ_xmin, occ_ymin, occ_xmax, occ_ymax, box_color=box_color, thickness=1, label='{}-{}'.format(str(obj_id), str(occ_id))) frame = resizeAR(frame, 1920, 1080) _pause, k = imshow('frame', frame, _pause) if k == ord('q'): print("skip_seq") skip_seq = 1 break if frame_id % 100 == 0: print('\t Done {:d}/{:d} frames'.format(frame_id, n_frames)) if skip_seq: continue meta_file_path = out_fname.replace('.txt', '.meta') with open(meta_file_path, 'w') as meta_fid: pprint(seq_occluded_dict, meta_fid) out_fid.close() print(n_frames_list)
def main(): params = { 'src_path': '.', 'save_path': '', 'save_root_dir': '', 'img_ext': 'jpg', 'show_img': 1, 'del_src': 0, 'start_id': 0, 'n_frames': 0, 'width': 0, 'height': 0, 'fps': 30, # 'codec': 'FFV1', # 'ext': 'avi', 'sleep': 0., 'codec': 'H264', 'ext': 'mkv', 'csv_ext': 'txt', 'out_postfix': '', 'reverse': 0, 'min_free_space': 30, } paramparse.process_dict(params) src_path = params['src_path'] min_free_space = params['min_free_space'] sleep = params['sleep'] csv_ext = params['csv_ext'] img_exts = ('.jpg', '.bmp', '.jpeg', '.png', '.tif', '.tiff', '.webp') src_path = os.path.abspath(src_path) # script_dir = os.path.dirname(os.path.realpath(__file__)) # log_path = os.path.join(script_dir, 'siif_log.txt') # with open(log_path, 'w') as fid: # fid.write(src_path) # # os.environ["MIIF_DUMP_IMAGE_PATH"] = src_path read_img_path = os.path.join(src_path, "read") if not os.path.exists(read_img_path): os.makedirs(read_img_path) print('SDIF started in {}'.format(src_path)) max_thresh = 100 min_thresh = 0 thresh = 0 show_all_classes = 0 win_name = 'detections' img = None def update_thresh(x): nonlocal thresh thresh = float(x) / 100. show_filtered_detections(img, all_detections, thresh, show_all_classes, win_name) def update_show_all_classes(x): nonlocal show_all_classes show_all_classes = int(x) show_filtered_detections(img, all_detections, thresh, show_all_classes, win_name) cv2.createTrackbar('threshold', win_name, int(thresh * 100), 100, update_thresh) cv2.createTrackbar('show_all_classes', win_name, show_all_classes, 2, update_show_all_classes) exit_program = 0 while not exit_program: if sleep > 0: time.sleep(sleep) _src_files = [ k for k in os.listdir(src_path) if os.path.splitext(k.lower())[1] in img_exts ] for _src_file in _src_files: img_src_path = os.path.join(src_path, _src_file) _src_file_no_ext, _src_file_ext = os.path.splitext(_src_file) time_stamp = datetime.now().strftime("_%y%m%d_%H%M%S_%f") img = cv2.imread(img_src_path) height, width = img.shape[:2] csv_src_path = os.path.join( src_path, '{}.{}'.format(_src_file_no_ext, csv_ext)) df = pd.read_csv(csv_src_path) n_detections = len(df.index) all_detections = [] for i in range(n_detections): df_bbox = df.iloc[i] xmin = df_bbox.loc['x1'] ymin = df_bbox.loc['y1'] xmax = df_bbox.loc['x2'] ymax = df_bbox.loc['y2'] class_id = df_bbox.loc['class_id'] score = df_bbox.loc['score'] all_detections.append( [xmin, ymin, xmax, ymax, class_id, score]) show_filtered_detections(img, all_detections, thresh, show_all_classes, win_name) w = xmax - xmin h = ymax - ymin if w < 0 or h < 0: print('\nInvalid box in image {} with dimensions {} x {}\n'. format(_src_file, w, h)) xmin, xmax = xmax, xmin ymin, ymax = ymax, ymin w = xmax - xmin h = ymax - ymin if w < 0 or h < 0: raise IOError( '\nSwapping corners still giving invalid dimensions {} x {}\n' .format(w, h)) # if w < min_size or h < min_size: # print('\nIgnoring image {} with too small {} x {} box '.format(image_name, w, h)) # return None, None, None # # continue def clamp(x, min_value=0.0, max_value=1.0): return max(min(x, max_value), min_value) xmin = clamp(xmin / width) xmax = clamp(xmax / width) ymin = clamp(ymin / height) ymax = clamp(ymax / height) # # xmins.append(xmin) # xmaxs.append(xmax) # ymins.append(ymin) # ymaxs.append(ymax) # # classes_text.append(class_name) # class_ids.append(class_id) img_dst_path = os.path.join( read_img_path, _src_file_no_ext + time_stamp + _src_file_ext) csv_dst_path = os.path.join( read_img_path, _src_file_no_ext + time_stamp + '.' + csv_ext) print(f'{img_src_path} -> {img_dst_path}') shutil.move(img_src_path, img_dst_path) shutil.move(csv_src_path, csv_dst_path)
def main(): params = { # 'in_fname': "C:/UofA/PhD/Code/deep_mdp/tracking_module/cmd/res_from_gt.md", 'in_fname': "", 'working_dir': '', 'start_id': 0, 'end_id': -1, 'server': '', 'pane_id': '12.0', 'pane_id_sep': '>', 'log_dir': 'log/rpip', 'enable_logging': 1, 'enable_tee': 0, 'batch_size': 50, } paramparse.process_dict(params) # processArguments(sys.argv[1:], params) _in_fname = params['in_fname'] working_dir = params['working_dir'] server = params['server'] log_dir = params['log_dir'] enable_logging = params['enable_logging'] enable_tee = params['enable_tee'] batch_size = params['batch_size'] if working_dir: os.chdir(working_dir) os.makedirs(log_dir, exist_ok=1) while True: src_dir = os.getcwd() src_file_gen = [[(f, os.path.join(dirpath, f)) for f in filenames] for (dirpath, dirnames, filenames) in os.walk(src_dir, followlinks=True)] fname_to_path = dict( [item for sublist in src_file_gen for item in sublist]) lines = None if _in_fname: in_fname = _in_fname _in_fname = '' else: in_fname = input('\nEnter script path or command\n') write('\nprocessing input: {}'.format(in_fname)) in_fname_no_ext, in_fname_ext = os.path.splitext( os.path.basename(in_fname)) if in_fname_ext in ('.bsh', '.md'): in_fnames = [ in_fname, ] elif in_fname_ext == '.bshm': try: in_fname_path = fname_to_path[in_fname] # in_fname_path = in_fname except KeyError: raise IOError('invalid file name: {}'.format(in_fname)) in_fnames = open(in_fname_path, 'r').readlines() in_fnames = [ __in_fname.strip() for __in_fname in in_fnames if __in_fname.strip() and not __in_fname.startswith('#') ] # write('lines:\n{}'.format(lines)) commands = [] time_stamp = datetime.now().strftime("%y%m%d_%H%M%S") for in_fname in in_fnames: if in_fname is not None: try: in_fname_path = fname_to_path[in_fname] # in_fname_path = in_fname except KeyError: raise IOError('invalid file name: {}'.format(in_fname)) write('\nReading from: {}'.format(in_fname_path)) lines = open(in_fname_path, 'r').readlines() else: assert lines is not None, "Both lines and in_fname cannot be None" basename = os.path.basename(in_fname_path) basename_no_ext, _ = os.path.splitext(basename) # write('lines: {}'.format(pformat(lines))) # command_lines = [_line for _line in lines if not _line.startswith('## @ ') and not _line.startswith('# ')] valid_line_id = 0 for line in lines: _line = line.strip() if not _line or not _line.startswith('python'): continue list_start_indices = [ m.start() for m in re.finditer("{", _line) ] list_end_indices = [m.start() for m in re.finditer("}", _line)] assert len(list_start_indices) == len(list_end_indices), \ "mismatch between number of list start and end markers" _multi_token_lines = [ _line, ] for _start_id, _end_id in zip(list_start_indices, list_end_indices): substr = _line[_start_id:_end_id + 1] replacement_vals = paramparse.str_to_tuple(substr[1:-1]) temp_line_list = [] for __line in _multi_token_lines: for _val in replacement_vals: new_line = __line.replace(substr, str(_val), 1) temp_line_list.append(new_line) _multi_token_lines = temp_line_list for __line_id, __line in enumerate(_multi_token_lines): tee_log_id = '{}_{}_{}_{}'.format(basename_no_ext, valid_line_id, __line_id, time_stamp) if server: tee_log_id = '{}_{}'.format(tee_log_id, server) if enable_logging: if enable_tee: __line = '{} @ tee_log={}'.format( __line, tee_log_id) """disable python output buffering to ensure in-order output in the logging fine""" if __line.startswith('python '): __line = __line.replace('python ', 'python -u ', 1) elif __line.startswith('python3 '): __line = __line.replace('python3 ', 'python3 -u ', 1) elif __line.startswith('python36 '): __line = __line.replace('python36 ', 'python36 -u ', 1) elif __line.startswith('python2 '): __line = __line.replace('python2 ', 'python2 -u ', 1) commands.append((__line, tee_log_id)) valid_line_id += 1 n_commands = len(commands) n_batches = int(np.ceil(n_commands / batch_size)) avg_batch_time = 0 for batch_id in range(n_batches): start_batch_id = int(batch_id * batch_size) end_batch_id = min(start_batch_id + batch_size, n_commands) actual_batch_size = end_batch_id - start_batch_id batch_commands = commands[start_batch_id:end_batch_id] batch_start_t = time.time() batch_start_time_stamp = datetime.now().strftime("%y%m%d_%H%M%S") processes = [] for _cmd_id, _cmd_data in enumerate(batch_commands): _cmd, tee_log_id = _cmd_data txt = '{}: {}'.format(_cmd_id + start_batch_id, _cmd) write(txt) # subprocess.Popen(_cmd.split(' ')) args = shlex.split(_cmd) if enable_logging: out_fname = tee_log_id + '.ansi' zip_fname = out_fname.replace('.ansi', '.zip') out_path = linux_path(log_dir, out_fname) zip_path = os.path.join(log_dir, zip_fname) write('{}\n'.format(out_path)) write('{}\n'.format(zip_path)) f = open(out_path, 'w') p = subprocess.Popen(args, stdout=f, stderr=f) else: write('\n') f = out_fname = zip_fname = None p = subprocess.Popen(args) processes.append((p, f, out_fname, zip_fname)) write('{} :: running batch {} / {} with {} commands ...'.format( batch_start_time_stamp, batch_id + 1, n_batches, actual_batch_size)) for p, f, f_name, zip_fname in processes: p.wait() if f is None: continue f.close() zip_cmd = 'cd {} && zip -rm {} {}'.format( log_dir, zip_fname, f_name) os.system(zip_cmd) batch_end_t = time.time() batch_time = batch_end_t - batch_start_t avg_batch_time += (batch_time - avg_batch_time) / (batch_id + 1) batch_end_time_stamp = datetime.now().strftime("%y%m%d_%H%M%S") write('{} :: Batch {} completed. Time taken: {} sec (avg: {} sec)'. format(batch_end_time_stamp, batch_id + 1, batch_time, avg_batch_time))
def main(): params = { 'src_path': '.', 'save_path': '', 'save_root_dir': '', 'img_ext': 'jpg', 'show_img': 1, 'del_src': 0, 'start_id': 0, 'n_frames': 0, 'width': 0, 'height': 0, 'fps': 30, # 'codec': 'FFV1', # 'ext': 'avi', 'sleep': 0., 'codec': 'H264', 'ext': 'mkv', 'out_postfix': '', 'reverse': 0, 'min_free_space': 30, } paramparse.process_dict(params) src_path = params['src_path'] min_free_space = params['min_free_space'] sleep = params['sleep'] img_exts = ('.jpg', '.bmp', '.jpeg', '.png', '.tif', '.tiff', '.webp') src_path = os.path.abspath(src_path) # script_dir = os.path.dirname(os.path.realpath(__file__)) # log_path = os.path.join(script_dir, 'siif_log.txt') # with open(log_path, 'w') as fid: # fid.write(src_path) # # os.environ["MIIF_DUMP_IMAGE_PATH"] = src_path read_img_path = os.path.join(src_path, "read") if not os.path.exists(read_img_path): os.makedirs(read_img_path) print('MIIF started in {}'.format(src_path)) exit_program = 0 while not exit_program: if sleep > 0: time.sleep(sleep) _src_files = [k for k in os.listdir(src_path) if os.path.splitext(k.lower())[1] in img_exts] for _src_file in _src_files: _src_path = os.path.join(src_path, _src_file) _src_file_no_ext, _src_file_ext = os.path.splitext(_src_file) time_stamp = datetime.now().strftime("_%y%m%d_%H%M%S_%f") _dst_path = os.path.join(read_img_path, _src_file_no_ext + time_stamp + _src_file_ext) print(f'{_src_path} -> {_dst_path}') shutil.move(_src_path, _dst_path)
def main(): _params = { 'horz': 1, 'categories_out': 1, 'categories': 1, 'category_sep': ' :: ', 'date_sep': ' – ', 'pairwise': 1, 'first_and_last': 0, 'add_date': 1, 'add_diff': 1, 'add_comment': 1, 'min_start_time': '03:00:00', } paramparse.process_dict(_params) horz = _params['horz'] categories_out = _params['categories_out'] categories = _params['categories'] category_sep = _params['category_sep'] date_sep = _params['date_sep'] first_and_last = _params['first_and_last'] pairwise = _params['pairwise'] add_date = _params['add_date'] add_diff = _params['add_diff'] add_comment = _params['add_comment'] min_start_time = _params['min_start_time'] try: in_txt = Tk().clipboard_get() # type: str except BaseException as e: print('Tk().clipboard_get() failed: {}'.format(e)) return lines = [k.strip() for k in in_txt.split('\n') if k.strip()] min_start_time_obj = datetime.strptime(min_start_time, '%H:%M:%S').time() midnight_time_obj = datetime.strptime('00:00:00', '%H:%M:%S').time() out_lines = [] out_times = [] out_date_times = [] # out_date_time_str = [] out_categories = [] date_str = datetime.now().strftime("%Y-%m-%d") curr_comments = [] out_comments = [] for line in lines: category = None if categories: if category_sep in line: temp = line.split(category_sep) # print('line: {}'.format(line)) # print('temp: {}'.format(temp)) if len(temp) == 2: line, category = temp line = line.strip() category = category.strip() if date_sep in line: temp = line.split(date_sep) _, time_found, _ = is_time(temp[0]) if time_found: print('date_sep line: {}'.format(line)) print('date_sep temp: {}'.format(temp)) if len(temp) == 3: line, _, date_str = temp line = line.strip() elif len(temp) == 2: line, possible_date = temp line = line.strip() _, time_found, _ = is_time(possible_date) if not time_found: date_str = possible_date line, time_found, time_obj = is_time(line) if time_found: curr_comments_str = '' if curr_comments: curr_comments_str = ';'.join(curr_comments) curr_comments = [] # if not out_lines: # print('dangling comment found: {}'.format(curr_comments_str)) # else: # if out_lines: # print('no comment found for: {}'.format(line)) out_comments.append(curr_comments_str) date_obj = datetime.strptime(date_str, '%Y-%m-%d').date() if midnight_time_obj <= time_obj < min_start_time_obj: date_obj = date_obj + timedelta(days=1) date_time_obj = datetime.combine(date_obj, time_obj) # date_time_str = date_time_obj.strftime("%Y%m%d_%H%M%S") # out_date_time_str.append(date_time_str) out_date_times.append(date_time_obj) out_times.append(time_obj) out_lines.append(line) if categories: if category is None: category = -1 out_categories.append(category) else: curr_comments.append(line) if curr_comments: curr_comments_str = ';'.join(curr_comments) else: curr_comments_str = '' out_comments.append(curr_comments_str) sort_idx = [ i[0] for i in sorted(enumerate(out_date_times), key=lambda x: x[1]) ] # out_date_times = [out_date_times[i] for i in sort_idx] # # out_date_time_str = [out_date_time_str[i] for i in sort_idx] # out_times = [out_times[i] for i in sort_idx] # out_lines = [out_lines[i] for i in sort_idx] # out_comments = [out_comments[i] for i in sort_idx] if first_and_last and len(out_lines) > 2: out_date_times = [ out_date_times[sort_idx[0]], out_date_times[sort_idx[-1]] ] out_lines = [out_lines[sort_idx[0]], out_lines[sort_idx[-1]]] n_out_lines = len(out_lines) if pairwise: out_txt0 = '' out_txt = '' out_txt2 = '' out_txt3 = '' for _line_id in range(n_out_lines - 1): if horz: _out_txt = '{}\t{}'.format(out_lines[sort_idx[_line_id]], out_lines[sort_idx[_line_id + 1]]) if add_date: _out_txt = '{}\t{}'.format(date_str, _out_txt) if categories_out: _out_txt += '\t{}'.format( out_categories[sort_idx[_line_id + 1]]) if add_diff: time_diff = out_date_times[sort_idx[ _line_id + 1]] - out_date_times[sort_idx[_line_id]] time_diff_str = str(time_diff) if ',' in time_diff_str: """times from different days across midnight""" print( 'times from different days across midnight found') input() exit() # time_diff_str = time_diff_str.split(',')[-1].strip() _out_txt += '\t{}'.format(time_diff_str) if add_comment: """out_comments has an annoying extra entry at top""" _out_txt = '{}\t{}'.format( _out_txt, out_comments[sort_idx[_line_id + 1] + 1]) out_txt += _out_txt + '\n' else: out_txt += '{}\t'.format(out_lines[sort_idx[_line_id]]) out_txt2 += '{}\t'.format(out_lines[sort_idx[_line_id + 1]]) if add_date: out_txt0 = '{}\t'.format(date_str) if categories_out: out_txt3 += '{}\t'.format( out_categories[sort_idx[_line_id + 1]]) if not horz: out_txt += '\n' + out_txt2 if add_date: out_txt = '{}\n{}'.format(out_txt0, out_txt) if categories: out_txt += '\n' + out_txt3 else: if horz: field_sep = '\t' else: field_sep = '\n' out_txt = field_sep.join(out_lines) out_txt = out_txt.rstrip() print('out_txt:\n{}'.format(out_txt)) try: import pyperclip pyperclip.copy(out_txt) spam = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e))
def main(): _params = { 'horz': 1, 'category': 2, 'category_sep': ' :: ', 'date_sep': ' – ', 'pairwise': 1, 'first_and_last': 0, 'add_date': 1, 'add_diff': 1, } paramparse.process_dict(_params) category = _params['category'] category_sep = _params['category_sep'] try: win32clipboard.OpenClipboard() in_txt = win32clipboard.GetClipboardData() # type: str except BaseException as e: print('Tk().clipboard_get() failed: {}'.format(e)) win32clipboard.CloseClipboard() return else: win32clipboard.CloseClipboard() lines = in_txt.split('\n') out_lines = [] for line in lines: in_category = None if category_sep in line: temp = line.split(category_sep) # print('line: {}'.format(line)) # print('temp: {}'.format(temp)) if len(temp) == 2: line, in_category = temp line = line.strip() in_category = int(in_category.strip()) line, time_found, time_obj = is_time(line) if time_found: if in_category is not None and in_category != category: print('replacing category {} with {} in {}'.format( in_category, category, line)) line = '{} :: {}'.format(line, category) out_lines.append(line) field_sep = '\n' out_txt = field_sep.join(out_lines) out_txt = out_txt.rstrip() print('out_txt:\n{}'.format(out_txt)) try: pyperclip.copy(out_txt) spam = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e))
'res': '', 'fps': 30, 'codec': 'H264', 'ext': 'mkv', 'use_skv': 0, 'recursive': 1, 'disable_suffix': 0, 'out_postfix': '', 'add_headers': 0.0, 'remove_border': 0, 'rotate': 0, 'vid_exts': ['.mkv', '.mp4', '.avi', '.mjpg', '.wmv'], 'img_exts': ['.jpg', '.png', '.jpeg', '.tif', '.bmp'], } paramparse.process_dict(params) _src_path = params['src_path'] save_path = params['save_path'] img_ext = params['img_ext'] show_img = params['show_img'] del_src = params['del_src'] start_id = params['start_id'] n_frames = params['n_frames'] res = params['res'] fps = params['fps'] codec = params['codec'] ext = params['ext'] reverse = params['reverse'] combine = params['combine'] out_postfix = params['out_postfix']
def main(): params = { 'in_fname': '', 'start_id': 0, 'end_id': -1, 'server': '', 'pane_id': '12.0', 'pane_id_sep': '>', 'log_dir': 'log/tee', 'enable_logging': 0, } paramparse.process_dict(params) # processArguments(sys.argv[1:], params) _in_fname = params['in_fname'] start_id = params['start_id'] end_id = params['end_id'] server = params['server'] pane_id_sep = params['pane_id_sep'] pane_id_default = params['pane_id'] log_dir = params['log_dir'] enable_logging = params['enable_logging'] prev_in_fname = None while True: lines = None if _in_fname: in_fname = _in_fname _in_fname = '' else: in_fname = input('\nEnter script path or command\n') if not in_fname.strip(): if prev_in_fname is not None: in_fname = prev_in_fname else: continue write('\nprocessing input: {}'.format(in_fname)) src_dir = os.getcwd() src_file_gen = [[(f, os.path.join(dirpath, f)) for f in filenames] for (dirpath, dirnames, filenames) in os.walk(src_dir, followlinks=True)] fname_to_path = dict([item for sublist in src_file_gen for item in sublist]) prev_in_fname = in_fname in_fname_no_ext, in_fname_ext = os.path.splitext(os.path.basename(in_fname)) if in_fname_ext == '.bsh': in_fnames = [in_fname, ] elif in_fname_ext == '.bshm': try: in_fname_path = fname_to_path[in_fname] except KeyError: raise IOError('invalid file name: {}'.format(in_fname)) in_fnames = open(in_fname_path, 'r').readlines() in_fnames = [__in_fname.strip() for __in_fname in in_fnames if __in_fname.strip() and not __in_fname.startswith('#')] else: tokens = in_fname.split(' ') try: _pane_id = int(tokens[0]) except ValueError: try: _pane_id = float(tokens[0]) except ValueError as e: print('float pane id failed: {}'.format(e)) _pane_id = pane_id_default _line = in_fname else: _pane_id = tokens[0] _line = in_fname[len(tokens[0]) + 1:] else: _pane_id = '{}.0'.format(_pane_id) _line = in_fname[len(tokens[0]) + 1:] lines = ['## @ {}:{}'.format(server, _pane_id), _line] in_fnames = [None, ] # write('lines:\n{}'.format(lines)) all_pane_ids = [] for in_fname in in_fnames: if in_fname is not None: try: in_fname_path = fname_to_path[in_fname] except KeyError: raise IOError('invalid file name: {}'.format(in_fname)) write('\nReading from: {}'.format(in_fname_path)) lines = open(in_fname_path, 'r').readlines() else: assert lines is not None, "Both lines and in_fname cannot be None" # write('lines: {}'.format(pformat(lines))) pane_to_commands = {} pane_to_log = {} # pprint(lines) cmd_id = 0 pane_id = '' # command_lines = [_line for _line in lines if not _line.startswith('## @ ') and not _line.startswith('# ')] for line in lines: _line = line.strip() if not _line: continue if _line.startswith('## @ '): pane_id = _line.replace('## @ ', '') if pane_id not in pane_to_commands: cmd_id += 1 if cmd_id < start_id: write('skipping {} with too small cmd_id'.format(pane_id)) continue if cmd_id > end_id > start_id: write('skipping {} with too large cmd_id'.format(pane_id)) break pane_to_commands[pane_id] = [] pane_to_log[pane_id] = [] # pane_to_commands[pane_id].append('tmux send-keys -t {}'.format(pane_id)) continue elif _line.startswith('# '): continue if server and pane_id and not pane_id.startswith(server): # write('skipping {} with invalid server'.format(pane_id)) if pane_id in pane_to_commands: del pane_to_commands[pane_id] del pane_to_log[pane_id] continue list_start_indices = [m.start() for m in re.finditer("{", _line)] list_end_indices = [m.start() for m in re.finditer("}", _line)] assert len(list_start_indices) == len(list_end_indices), \ "mismatch between number of list start and end markers" _multi_token_lines = [_line, ] for _start_id, _end_id in zip(list_start_indices, list_end_indices): substr = _line[_start_id:_end_id + 1] replacement_vals = paramparse.str_to_tuple(substr[1:-1]) temp_line_list = [] for __line in _multi_token_lines: for _val in replacement_vals: new_line = __line.replace(substr, str(_val), 1) temp_line_list.append(new_line) _multi_token_lines = temp_line_list # _line_tokens = _line.split(' ') # _multi_tokens = [(__id, token) for __id, token in enumerate(_line_tokens) # if token.startswith('__rpit_multi__')] # if _multi_tokens: # assert len(_multi_tokens) == 1, "only singluar multi_tokens per line supported for now" # __id, _multi_token = _multi_tokens[0] # # _multi_token_lines = [] # _arg_name, _file_name, _start_id, _end_id = _multi_token.split(':') # _multi_token_arg_vals = open(_file_name, 'r').readlines()[_start_id:_end_id + 1] # # for _arg_val in _multi_token_arg_vals: # _multi_tokens_copy = _multi_tokens[:] # _multi_tokens_copy[__id] = '{}={}'.format(_arg_name, _arg_val) # # _multi_token_line = ' '.join(_multi_tokens_copy) # _multi_token_lines.append(_multi_token_line) # else: # _multi_token_lines = [_line, ] # print(_multi_token_lines) for __line in _multi_token_lines: if enable_logging: time_stamp = datetime.now().strftime("%y%m%d_%H%M%S_%f") log_fname = '{}.ansi'.format(time_stamp) log_path = os.path.join(log_dir, log_fname) tee_log_id = '{}:{}'.format(pane_id, time_stamp) __line = '{} @ tee_log={} 2>&1 | tee {}'.format(__line, tee_log_id, log_path) """disable python output buffering to ensure in-order output in the logging fine""" if __line.startswith('python '): __line = __line.replace('python ', 'python -u ', 1) elif __line.startswith('python3 '): __line = __line.replace('python3 ', 'python3 -u ', 1) elif __line.startswith('python36 '): __line = __line.replace('python36 ', 'python36 -u ', 1) elif __line.startswith('python2 '): __line = __line.replace('python2 ', 'python2 -u ', 1) pane_to_log[pane_id].append(log_fname) pane_to_commands[pane_id].append('tmux send-keys -t {} "{}" Enter Enter'.format( pane_id, # pane_to_commands[pane_id][-1], __line) ) # write('pane_to_commands: {}'.format(pformat(pane_to_commands))) lines = None for pane_id in pane_to_commands: for _cmd_id, _cmd in enumerate(pane_to_commands[pane_id]): txt = 'running command {} in {}'.format(_cmd_id, pane_id) # txt += '\n' + _cmd if enable_logging: mkdir_cmd = 'mkdir -p {}'.format(log_dir) os.system('tmux send-keys -t {} "{}" Enter'.format(pane_id, mkdir_cmd)) os.system(_cmd) if enable_logging: log_fname = pane_to_log[pane_id][_cmd_id] zip_fname = log_fname.replace('.ansi', '.zip') zip_path = os.path.join(log_dir, zip_fname) zip_cmd = '(cd {} && zip -rm {} {})'.format(log_dir, zip_fname, log_fname) os.system('tmux send-keys -t {} "{}" Enter'.format(pane_id, zip_cmd)) txt += ' with logging in {}'.format(zip_path) write(txt) all_pane_ids .append(pane_id) # all_pane_ids += list(pane_to_commands.keys()) all_pane_ids_str = '__'.join(all_pane_ids).replace(':', '_') write('{}'.format(all_pane_ids_str))
def main(): _params = { 'field_id': 1, 'field_sep': '\t', 'token_sep': '/', 'inverted': 1, 'remove_duplicates': 1, 'max_cols': 7, 'id_col': 1, 'data_cols': [0, 3, 6], 'extract_unique_id': 1, # 'mismatch_replace': [], 'mismatch_replace': ['abs', 'rand'], } paramparse.process_dict(_params) field_id = _params['field_id'] field_sep = _params['field_sep'] token_sep = _params['token_sep'] inverted = _params['inverted'] remove_duplicates = _params['remove_duplicates'] max_cols = _params['max_cols'] id_col = _params['id_col'] extract_unique_id = _params['extract_unique_id'] data_cols = _params['data_cols'] # pyautogui.hotkey('ctrl', 'c') # while True: # _ = input('Press enter to continue\n') try: in_txt = Tk().clipboard_get() except BaseException as e: print('Tk().clipboard_get() failed: {}'.format(e)) return else: lines = in_txt.split('\n') lines_list = [ line.strip().split(field_sep) for line in lines if line.strip() ] n_lines = len(lines_list) assert n_lines > 1, "too few lines to analyse" numerical_column_ids = [ i for i, val in enumerate(lines_list[0]) if is_number(val) ] if data_cols: numerical_column_ids = [numerical_column_ids[i] for i in data_cols] n_cols = len(numerical_column_ids) if max_cols < n_cols: numerical_column_ids = numerical_column_ids[:max_cols] n_cols = max_cols numerical_data = [[float(line[i]) for i in numerical_column_ids] for line in lines_list] numerical_data = np.array(numerical_data) all_ids = [line[id_col] for line in lines_list] all_ids_unique = all_ids if extract_unique_id: all_ids_commonprefix = os.path.commonprefix(all_ids) if all_ids_commonprefix: all_ids_unique = [ k.replace(all_ids_commonprefix, '') for k in all_ids_unique ] all_ids_inv = [_id[::-1] for _id in all_ids] all_ids_inv_commonprefix = os.path.commonprefix(all_ids_inv) if all_ids_inv_commonprefix: all_ids_inv_commonprefix_inv = all_ids_inv_commonprefix[::-1] all_ids_unique = [ k.replace(all_ids_inv_commonprefix_inv, '') for k in all_ids_unique ] max_row_ids = np.argmax(numerical_data, axis=0) min_row_ids = np.argmin(numerical_data, axis=0) max_vals = np.amax(numerical_data, axis=0) min_vals = np.amin(numerical_data, axis=0) # mean_vals = np.mean(numerical_data, axis=0) # median_vals = np.median(numerical_data, axis=0) max_lines = [lines[i] for i in max_row_ids] min_lines = [lines[i] for i in min_row_ids] max_line_ids = [all_ids_unique[i] for i in max_row_ids] min_line_ids = [all_ids_unique[i] for i in min_row_ids] max_vals_str = '\t'.join('{}\t{}'.format(k1, k2) for k1, k2 in zip(max_line_ids, max_vals)) min_vals_str = '\t'.join('{}\t{}'.format(k1, k2) for k1, k2 in zip(min_line_ids, min_vals)) # mean_vals_str = '\t'.join(str(k) for k in mean_vals) # median_vals_str = '\t'.join(str(k) for k in median_vals) out_txt = '\n'.join([ max_vals_str, min_vals_str, # mean_vals_str, median_vals_str ]) out_txt += '\n\n' + '\n'.join(max_lines) + '\n\n' + '\n'.join(min_lines) try: pyperclip.copy(out_txt) spam = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e))
def main(): params = { 'win_title': 'The Journal 8', 'use_ahk': 1, 'mode': 0, 'wait_t': 10, 'scp_dst': '', 'key_root': '', 'key_dir': '', 'auth_root': '', 'auth_dir': '', 'auth_file': '', 'auth_path': '', 'dst_path': '.', 'scp_path': '.', 'scp_name': 'grs', } paramparse.process_dict(params) win_title = params['win_title'] use_ahk = params['use_ahk'] mode = params['mode'] wait_t = params['wait_t'] scp_dst = params['scp_dst'] dst_path = params['dst_path'] scp_path = params['scp_path'] scp_name = params['scp_name'] key_root = params['key_root'] key_dir = params['key_dir'] auth_root = params['auth_root'] auth_dir = params['auth_dir'] auth_file = params['auth_file'] # Window.get_all_windows() if mode == -1 or mode == -2: pwd0 = auth_file port = None else: auth_path = linux_path(auth_root, auth_dir, auth_file) auth_data = open(auth_path, 'r').readlines() auth_data = [k.strip() for k in auth_data] dst_info = auth_data[0].split(' ') name00, name01, ecr0, key0 = dst_info[:4] if len(dst_info) > 4: port = dst_info[4] encryption_params = encryption.Params() encryption_params.mode = 1 encryption_params.root_dir = key_root encryption_params.parent_dir = key_dir encryption_params.in_file = ecr0 encryption_params.key_file = key0 encryption_params.process() pwd0 = encryption.run(encryption_params) # Form1.SetFocus() default_fmy_key = '0' if mode == 0 or mode == -1: data_type = 'filename (from)' highlight_key = '2' elif mode == 1 or mode == -2: data_type = 'filename (to)' highlight_key = '3' elif mode == 2: data_type = 'log' highlight_key = '4' while True: k = input('\nEnter {}\n'.format(data_type)) if not k: continue x, y = win32api.GetCursorPos() # EnumWindows(EnumWindowsProc(foreach_window), 0) if use_ahk: if mode == 0 or mode == -1: clip_txt = '{} from {}'.format(k, scp_name) elif mode == 1 or mode == -2: clip_txt = '{} to {}'.format(k, scp_name) try: import pyperclip pyperclip.copy(clip_txt) _ = pyperclip.paste() except BaseException as e: print('Copying to clipboard failed: {}'.format(e)) os.system('paste_with_cat_1') run_scp(dst_path, pwd0, scp_dst, scp_path, k, mode, port) continue GetWindowText = ctypes.windll.user32.GetWindowTextW GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW IsWindowVisible = ctypes.windll.user32.IsWindowVisible titles = [] def foreach_window(hwnd, lParam): if IsWindowVisible(hwnd): length = GetWindowTextLength(hwnd) buff = ctypes.create_unicode_buffer(length + 1) GetWindowText(hwnd, buff, length + 1) titles.append((hwnd, buff.value)) return True win32gui.EnumWindows(foreach_window, None) # for i in range(len(titles)): # print(titles[i]) target_title = [k[1] for k in titles if k[1].startswith(win_title)] # print('target_title: {}'.format(target_title)) if not target_title: print('Window with win_title: {} not found'.format(win_title)) run_scp(dst_path, pwd0, scp_dst, scp_path, k, mode, port) continue target_title = target_title[0] # print('target_title: {}'.format(target_title)) try: app = application.Application().connect(title=target_title, found_index=0) except BaseException as e: print('Failed to connect to app for window {}: {}'.format( target_title, e)) run_scp(dst_path, pwd0, scp_dst, scp_path, k, mode, port) continue try: app_win = app.window(title=target_title) except BaseException as e: print('Failed to access app window for {}: {}'.format( target_title, e)) run_scp(dst_path, pwd0, scp_dst, scp_path, k, mode, port) continue try: # if mode == 2: # enable_highlight = k.strip() # app_win.type_keys("^t~") # app_win.type_keys("^v") # app_win.type_keys("^+a") # if enable_highlight: # app_win.type_keys("^+%a") # # time.sleep(1) # app_win.type_keys("^+z") # app_win.type_keys("{RIGHT}{VK_SPACE}~") # else: # app_win.type_keys("{VK_SPACE}~") # # app_win.type_keys("^s") # continue app_win.type_keys("^t{VK_SPACE}::{VK_SPACE}1") app_win.type_keys("^+a") app_win.type_keys("^2") # app_win.type_keys("^+1") app_win.type_keys("{RIGHT}{LEFT}~") app_win.type_keys("^v") if mode == 1: app_win.type_keys("{LEFT}{RIGHT}{VK_SPACE}to{VK_SPACE}%s" % scp_name) # app_win.type_keys("^+a") # app_win.type_keys("^{}".format(highlight_key)) # app_win.type_keys("{LEFT}{RIGHT}~") # app_win.type_keys("^{}".format(default_fmy_key)) app_win.type_keys("~") app_win.type_keys("^s") mouse.move(coords=(x, y)) except BaseException as e: print('Failed to type entry in app : {}'.format(e)) pass run_scp(dst_path, pwd0, scp_dst, scp_path, k, mode, port)