def main(): global_config = config['Global'] device = torch.device('cpu') if global_config['use_gpu'] and torch.cuda.is_available(): device = torch.device('cuda') logger.info('使用设备:{}'.format(device)) logger.info('模型信息:{}'.format(config['Architecture'])) model = build_model(config['Architecture']) model.to(device) logger.info('加载预训练模型:{}'.format(global_config['pretrained_model'])) state_dict = torch.load(global_config['pretrained_model']) model.load_state_dict(state_dict) post_process_class = build_post_process(config['PostProcess']) ops = [] for op in config['Eval']['dataset']['transforms']: op_name = list(op)[0] if 'Label' in op_name: continue elif op_name == "KeepKeys": op[op_name]['keep_keys'] = ['image', 'shape'] ops.append(op) transforms = create_transformers(ops) save_res_path = global_config['save_res_path'] save_dir = os.path.dirname(save_res_path) if not os.path.exists(save_dir): os.makedirs(save_dir) model.eval() with open(save_res_path, 'wb') as fout: for file in get_img_list(global_config['infer_img']): logger.info(f"测试图像:{file}") data = {'image': file} batch = transforms(data) images = np.expand_dims(batch[0], axis=0) shape_list = np.expand_dims(batch[1], axis=0) images = torch.from_numpy(images).to(device) preds = model(images) post_result = post_process_class(preds, shape_list) boxes = post_result[0]['points'] dt_boxes_json = [] for box in boxes: tmp_json = {"transcription": ""} tmp_json['points'] = box.tolist() dt_boxes_json.append(tmp_json) otstr = file + "\t" + json.dumps(dt_boxes_json) + '\n' fout.write(otstr.encode()) src_img = cv.imread(file) draw_det_res(boxes, save_dir, src_img, file) logger.info("结果已保存!")
def console_main(): URL, no_to_download, format_list, download_path, max_filesize, dump_urls, use_ghost = get_arguments( ) print "\n ImageScraper\n ============\n Requesting page....\n" page_html, page_url = get_html(URL, use_ghost) images = get_img_list(page_html, page_url, format_list) if len(images) == 0: sys.exit("Sorry, no images found.") if no_to_download == 0: no_to_download = len(images) print "Found %s images: " % len(images) process_download_path(download_path) for img_url in images: if dump_urls: print img_url count = 0 percent = 0.0 failed = 0 over_max_filesize = 0 widgets = [ 'Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed() ] pbar = ProgressBar(widgets=widgets, maxval=100).start() for img_url in images: flag, size_flag = download_image(img_url, download_path, max_filesize) if not flag: if not size_flag: failed += 1 else: over_max_filesize += 1 count += 1 percent = percent + 100.0 / no_to_download pbar.update(percent % 100) if count == no_to_download: break pbar.finish() print "\nDone!\nDownloaded %s images" % (count - failed - over_max_filesize) return
def main(): global_config = config['Global'] device = torch.device('cpu') if global_config['use_gpu'] is True and torch.cuda.is_available(): device = torch.device('cuda') logger.info('使用设备:{}'.format(device)) post_process_class = build_post_process(config['PostProcess'], global_config) if hasattr(post_process_class, 'character'): config['Architecture']["Head"]['out_channels'] = len( getattr(post_process_class, 'character')) logger.info('构建模型,字典包含{}个字'.format( config['Architecture']["Head"]['out_channels'])) logger.info('模型结构:{}'.format(config['Architecture'])) model = build_model(config['Architecture']) model.to(device) logger.info('加载预训练模型 {}...'.format(global_config['pretrained_model'])) state_dict = torch.load(global_config['pretrained_model']) model.load_state_dict(state_dict) ops = [] for op in config['Eval']['dataset']['transforms']: op_name = list(op)[0] if 'Label' in op_name: continue elif op_name in ['RecResizeImg']: op[op_name]['infer_mode'] = True elif op_name == 'KeepKeys': op[op_name]['keep_keys'] = ['image'] ops.append(op) global_config['infer_mode'] = True transforms = create_transformers(ops, global_config) model.eval() for file in get_img_list(config['Global']['infer_img']): logger.info('输入图像:{}'.format(file)) data = {'image': file} batch = transforms(data) images = torch.from_numpy(batch[0]).to(device) images = images.unsqueeze(0) preds = model(images) post_result = post_process_class(preds) logger.info("result: {}".format(post_result))
def run(): data_dir = 'dataset' data_names = sorted(os.listdir(data_dir)) try: for data_name in data_names: tracker = KernelizedCorrelationFilter(correlation_type='gaussian', feature='deep') data_path = join(data_dir, data_name) gts = get_ground_truthes(data_path) img_dir = os.path.join(data_path, 'img') frame_list = get_img_list(img_dir) frame_list.sort() poses = tracker.start(init_gt=gts[0], show=True, frame_list=frame_list) print(poses) except Exception as e: print(e)
def __init__(self, path, transform=None, mode='train', val_path=None): self.img_path = path + 'case/' self.mask_path = path + 'mask/' self.transform = transform self.mode = mode if self.mode == 'train': if not (os.path.isfile(path + 'train.txt')): utils.get_img_list(path, 'train') with open(path + 'train.txt', 'r') as f_train: log = f_train.readlines() elif self.mode == 'validation': self.val_path = val_path if not (os.path.isfile(path + 'val_case.txt')): utils.get_img_list(path, 'val', val_path=val_path) with open(val_path + 'val.txt', 'r') as f_val: log = f_val.readlines() elif self.mode == 'test': if not (os.path.isfile(path + 'test.txt')): utils.get_img_list(path, 'test') with open(path + 'test.txt', 'r') as f_test: log = f_test.readlines() self.log = log
def gen_img(): while True: frame = get_img_list() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')