def main(data_root='', seqs=('', ), args=""): logger.setLevel(logging.INFO) data_type = 'mot' result_root = os.path.join(Path(data_root), "mot_results") mkdir_if_missing(result_root) cfg = get_config() cfg.merge_from_file(args.config_detection) cfg.merge_from_file(args.config_deepsort) # run tracking accs = [] for seq in seqs: logger.info('start seq: {}'.format(seq)) result_filename = os.path.join(result_root, '{}.txt'.format(seq)) video_path = data_root + "/" + seq + "/video/video.mp4" with VideoTracker(cfg, args, video_path, result_filename) as vdo_trk: vdo_trk.run() # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename)) # get summary metrics = mm.metrics.motchallenge_metrics mh = mm.metrics.create() summary = Evaluator.get_summary(accs, seqs, metrics) strsummary = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names) print(strsummary) Evaluator.save_summary(summary, os.path.join(result_root, 'summary_global.xlsx'))
def main(data_root='/mnt/EEC2C12EC2C0FBB9/Users/mirap/CMP/MOTDT/datasets', det_root=None, seqs=('MOT17-01-DPM ', ), exp_name='demo', save_image=True, show_image=False): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdirs(result_root) data_type = 'mot' # run tracking accs = [] for seq in seqs: output_dir = os.path.join(data_root, 'outputs', seq) if save_image else None logger.info('start seq: {}'.format(seq)) loader = get_loader(data_root, det_root, seq) result_filename = os.path.join(result_root, '{}.txt'.format(seq)) eval_seq(loader, data_type, result_filename, save_dir=output_dir, show_image=show_image) # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename))
def main(data_root='/home/SharedData/swapnil/tracker/MOTDT/data/MOT16/train', det_root=None, seqs=('MOT16-05', ), exp_name='demo', save_image=True, show_image=True): #def main(data_root='/home/SharedData/swapnil/tracker/MOTDT/data/MOT16/train', det_root=None, # seqs=('MOT16-02','MOT16-04','MOT16-05','MOT16-09','MOT16-10','MOT16-11','MOT16-13',), exp_name='demo', save_image=False, show_image=False): #def main(data_root='/home/SharedData/swapnil/tracker/MOTDT/data/MOT16/test', det_root=None, # seqs=('MOT16-01','MOT16-03','MOT16-06','MOT16-07','MOT16-08','MOT16-12','MOT16-14',), exp_name='demo', save_image=False, show_image=False): #logger.setLevel(logging.DEBUG) logger.setLevel(logging.INFO) # run tracking for seq in seqs: result_root = os.path.join('RESULT/mot16') mkdirs(result_root) output_dir = os.path.join(result_root, seq, 'img') if save_image else None #logger.info('start seq: {}'.format(seq)) loader = get_loader(data_root, det_root, seq) run(loader, os.path.join(result_root, '{}.txt'.format(seq)), save_dir=output_dir, show_image=show_image)
def track(opt): logger.setLevel(logging.INFO) result_root = opt.output_root if opt.output_root != '' else '.' mkdir_if_missing(result_root) # run tracking timer = Timer() accs = [] n_frame = 0 logger.info('start tracking...') dataloader = datasets.LoadVideo(opt.input_video, opt.img_size) result_filename = os.path.join(result_root, 'results.txt') frame_rate = dataloader.frame_rate frame_dir = None if opt.output_format == 'text' else osp.join( result_root, 'frame') try: eval_seq(opt, dataloader, 'mot', result_filename, save_dir=frame_dir, show_image=False, frame_rate=frame_rate) except Exception as e: logger.info(e) if opt.output_format == 'video': output_video_path = osp.join(result_root, 'result.mp4') cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format( osp.join(result_root, 'frame'), output_video_path) os.system(cmd_str)
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', save_images=False, save_videos=False, show_image=True): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdir_if_missing(result_root) data_type = 'mot' # Read config cfg_dict = parse_model_cfg(opt.cfg) opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])] # run tracking accs = [] n_frame = 0 timer_avgs, timer_calls = [], [] for seq in seqs: output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None logger.info('start seq: {}'.format(seq)) dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size) result_filename = os.path.join(result_root, '{}.txt'.format(seq)) meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')]) nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename, save_dir=output_dir, show_image=show_image, frame_rate=frame_rate) n_frame += nf timer_avgs.append(ta) timer_calls.append(tc) # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename)) if save_videos: output_video_path = osp.join(output_dir, '{}.mp4'.format(seq)) cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path) os.system(cmd_str) timer_avgs = np.asarray(timer_avgs) timer_calls = np.asarray(timer_calls) all_time = np.dot(timer_avgs, timer_calls) avg_time = all_time / np.sum(timer_calls) logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time)) # get summary metrics = mm.metrics.motchallenge_metrics mh = mm.metrics.create() summary = Evaluator.get_summary(accs, seqs, metrics) strsummary = mm.io.render_summary( summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names ) print(strsummary) sys.stdout.flush() Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
def check_args(self): for required_argument in self.required_arguments_list: if not hasattr(self.args, required_argument) or not getattr(self.args, required_argument): self.parser_group_plugin.print_help() logger.error("[INIT][Plugin] Plugin {} argument {} is require.".format(self.plugin_name, required_argument)) exit() if hasattr(self.args, "debug") and self.args.debug: logger.setLevel(logging.DEBUG) logger.debug('[INIT] set logging level: debug') return True
def main(data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', save_image=False, show_image=True): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdirs(result_root) data_type = 'mot' # run tracking for seq in seqs: output_dir = os.path.join(data_root, 'outputs', seq) if save_image else None logger.info('start seq: {}'.format(seq)) loader = get_loader(data_root, det_root, seq) eval_seq(loader, data_type, os.path.join(result_root, '{}.txt'.format(seq)), save_dir=output_dir, show_image=show_image)
def main(data_root=os.path.expanduser('~/Data/MOT16/train'), det_root=None, seqs=('MOT16-05', ), exp_name='demo', save_image=False, show_image=True, args=None): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdirs(result_root) data_type = 'mot' # run tracking accs = [] for seq in seqs: output_dir = os.path.join(data_root, 'outputs', seq) if save_image else None logger.info('start seq: {}'.format(seq)) loader = get_loader(data_root, det_root, seq) result_filename = os.path.join(result_root, '{}.txt'.format(seq)) eval_seq(loader, data_type, result_filename, save_dir=output_dir, show_image=show_image, args=args) # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename)) # get summary # metrics = ['mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall'] metrics = mm.metrics.motchallenge_metrics # metrics = None mh = mm.metrics.create() summary = Evaluator.get_summary(accs, seqs, metrics) strsummary = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names) print(strsummary) Evaluator.save_summary( summary, os.path.join(result_root, f'summary_{exp_name}.xlsx'))
def main(data_root='/home/SharedData/swapnil/tracker/MOTDT/data/MOT16/train', det_root=None, seqs=('MOT16-02','MOT16-04','MOT16-05','MOT16-09','MOT16-10','MOT16-11','MOT16-13',), exp_name='demo', save_image=False, show_image=True,from_video = True,live_demo = False,write_video = True): #logger.setLevel(logging.DEBUG) logger.setLevel(logging.INFO) ## give path of video if from_video: video_path = sys.argv[1] else: video_path =None # run tracking for seq in seqs: result_root = os.path.join('RESULT/MOT16/dets_yolo') mkdirs(result_root) output_dir = os.path.join(result_root,seq,'img') if save_image else None #logger.info('start seq: {}'.format(seq)) tracking(data_root,seq,result_root,save_dir=output_dir, show_image=show_image,from_video =from_video,video_path = video_path,write_video =write_video,live_demo = live_demo)
Todo: * Add compatibility for non-GPU machines (would run slow) * More documentation """ import logging import argparse from utils.utils import * from utils.log import logger from utils.timer import Timer from utils.parse_config import parse_model_cfg import utils.datasets as datasets from track import eval_seq logger.setLevel(logging.INFO) def track(opt): result_root = opt.output_root if opt.output_root != '' else '.' mkdir_if_missing(result_root) cfg_dict = parse_model_cfg(opt.cfg) opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])] # run tracking timer = Timer() accs = [] n_frame = 0 logger.info('Starting tracking...')
def main(): try: # arg parse t1 = time.time() parser = argparse.ArgumentParser(prog=__title__, description=__introduction__.format(detail="Main Program"), epilog=__epilog__, formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS) subparsers = parser.add_subparsers() parser_group_init = subparsers.add_parser('init', help='Kunlun-M init before use.') parser_group_init.add_argument('-init', action='store_true', default=False) parser_group_core = subparsers.add_parser('config', help='config for rule&tamper', description=__introduction__.format(detail='config for rule&tamper'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_core.add_argument('load', choices=['load', 'recover', 'loadtamper', 'retamper'], default=False, help='operate for rule&tamper') parser_group_scan = subparsers.add_parser('scan', help='scan target path', description=__introduction__.format(detail='scan target path'), epilog=__scan_epilog__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=True) parser_group_scan.add_argument('-t', '--target', dest='target', action='store', default='', metavar='<target>', help='file, folder') parser_group_scan.add_argument('-f', '--format', dest='format', action='store', default='csv', metavar='<format>', choices=['html', 'json', 'csv', 'xml'], help='vulnerability output format (formats: %(choices)s)') parser_group_scan.add_argument('-o', '--output', dest='output', action='store', default='', metavar='<output>', help='vulnerability output STREAM, FILE') parser_group_scan.add_argument('-r', '--rule', dest='special_rules', action='store', default=None, metavar='<rule_id>', help='specifies rules e.g: 1000, 1001') parser_group_scan.add_argument('-tp', '--tamper', dest='tamper_name', action='store', default=None, metavar='<tamper_name>', help='tamper repair function e.g: wordpress') parser_group_scan.add_argument('-l', '--log', dest='log', action='store', default=None, metavar='<log>', help='log name') parser_group_scan.add_argument('-lan', '--language', dest='language', action='store', default=None, help='set target language') parser_group_scan.add_argument('-b', '--blackpath', dest='black_path', action='store', default=None, help='black path list') parser_group_scan.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='open debug mode') parser_group_scan.add_argument('-uc', '--unconfirm', dest='unconfirm', action='store_false', default=False, help='show unconfirmed vuls') parser_group_scan.add_argument('-upc', '--unprecom', dest='unprecom', action='store_false', default=False, help='without Precompiled') parser_group_show = subparsers.add_parser('show', help='show rule&tamper', description=__introduction__.format(detail='show rule&tamper'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_show.add_argument('list', choices=['rule', "tamper"], action='store', default=None, help='show all rules & tanmpers') parser_group_show.add_argument('-k', '--key', dest='listkey', action='store', default="all", help='key for show rule & tamper. eg: 1001/wordpress') parser_group_console = subparsers.add_parser('console', help='enter console mode', description=__introduction__.format(detail='enter console mode'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_console.add_argument('console', action='store_true', default=True, help='enter console mode') # 加载插件参数列表以及帮助 parser_group_plugin = subparsers.add_parser('plugin', help='Load plugins', description=__introduction__.format(detail='Load plugins'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_plugin.add_argument('plugin_name', choices=plugins.PLUGIN_LIST, default=False, help='enter plugin name') # args = parser.parse_args() args = parser.parse_known_args()[0] # log log(logging.INFO) # 插件需要提前声明 if hasattr(args, "plugin_name") and args.plugin_name: logger.info('[INIT] Load Plugin {}.'.format(args.plugin_name)) plugins.PLUGIN_DICT[args.plugin_name](parser, parser_group_plugin) exit() # 其余需要验证 args = parser.parse_args() if hasattr(args, "debug") and args.debug: logger.setLevel(logging.DEBUG) logger.debug('[INIT] set logging level: debug') if hasattr(args, "init"): logger.info('Init Database for KunLun-M.') call_command('makemigrations') call_command('migrate') logger.info('Init Database Finished.') exit() if hasattr(args, "load"): if args.load == "load": logger.info("[INIT] RuleCheck start.") RuleCheck().load() logger.info("[INIT] RuleCheck finished.") exit() elif args.load == "recover": logger.info("[INIT] RuleRecover start.") RuleCheck().recover() logger.info("[INIT] RuleRecover finished.") exit() elif args.load == "loadtamper": logger.info("[INIT] TamperCheck start.") TamperCheck().load() logger.info("[INIT] TamperCheck finished.") exit() elif args.load == "retamper": logger.info("[INIT] TamperRecover start.") TamperCheck().recover() logger.info("[INIT] TamperRecover finished.") exit() else: parser_group_core.print_help() exit() if hasattr(args, "list"): if args.list: logger.info("Show {}:\n{}".format(args.list, show_info(args.list, args.listkey.strip("")))) exit() else: parser_group_show.print_help() exit() if hasattr(args, "console"): # check rule and tamper logger.info("[INIT] RuleCheck start.") RuleCheck().load() logger.info("[INIT] RuleCheck finished.") logger.info("[INIT] TamperCheck start.") TamperCheck().load() logger.info("[INIT] TamperCheck finished.") logger.info("[INIT] Enter KunLun-M console mode.") shell = KunlunInterpreter() shell.start() exit() if not hasattr(args, "target") or args.target == '': parser.print_help() exit() logger.debug('[INIT] start Scan Task...') # new scan task task_name = get_mainstr_from_filename(args.target) s = cli.check_scantask(task_name=task_name, target_path=args.target, parameter_config=sys.argv) if s.is_finished: logger.info("[INIT] Finished Task.") exit() # 标识任务id sid = str(s.id) get_scan_id() if hasattr(args, "log") and args.log: logger.info("[INIT] New Log file {}.log .".format(args.log)) log_add(logging.INFO, args.log) else: logger.info("[INIT] New Log file ScanTask_{}.log .".format(sid)) log_add(logging.INFO, "ScanTask_{}".format(sid)) if hasattr(args, "debug") and args.debug: logger.setLevel(logging.DEBUG) logger.debug('[INIT] set logging level: debug') data = { 'status': 'running', 'report': '' } Running(sid).status(data) cli.start(args.target, args.format, args.output, args.special_rules, sid, args.language, args.tamper_name, args.black_path, args.unconfirm, args.unprecom) s.is_finished = True s.save() t2 = time.time() logger.info('[INIT] Done! Consume Time:{ct}s'.format(ct=t2 - t1)) except KeyboardInterrupt: logger.warning("[KunLun-M] Stop KunLun-M.") sys.exit(0) except Exception as e: exc_msg = traceback.format_exc() logger.warning(exc_msg)
def main(): try: # arg parse t1 = time.time() parser = argparse.ArgumentParser( prog=__title__, description=__introduction__.format(detail="Main Program"), epilog=__epilog__, formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS) subparsers = parser.add_subparsers() # init parser_group_init = subparsers.add_parser( 'init', help='Kunlun-M init before use.') parser_group_init.add_argument('init', choices=['initialize', 'checksql'], default='init', help='check and migrate SQL') parser_group_init.add_argument( 'appname', choices=['index', 'dashboard', 'backend', 'api'], nargs='?', default='index', help='Check App name') parser_group_init.add_argument('migrationname', default='migrationname', nargs='?', help='Check migration name') # load config into database parser_group_core = subparsers.add_parser( 'config', help='config for rule&tamper', description=__introduction__.format( detail='config for rule&tamper'), epilog=__database_epilog__, formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_core.add_argument( 'load', choices=['load', 'recover', 'loadtamper', 'retamper'], default=False, help='operate for rule&tamper') parser_group_scan = subparsers.add_parser( 'scan', help='scan target path', description=__introduction__.format(detail='scan target path'), epilog=__scan_epilog__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=True) parser_group_scan.add_argument('-t', '--target', dest='target', action='store', default='', metavar='<target>', help='file, folder') parser_group_scan.add_argument( '-f', '--format', dest='format', action='store', default='csv', metavar='<format>', choices=['html', 'json', 'csv', 'xml'], help='vulnerability output format (formats: %(choices)s)') parser_group_scan.add_argument( '-o', '--output', dest='output', action='store', default='', metavar='<output>', help='vulnerability output STREAM, FILE') parser_group_scan.add_argument('-r', '--rule', dest='special_rules', action='store', default=None, metavar='<rule_id>', help='specifies rules e.g: 1000, 1001') parser_group_scan.add_argument( '-tp', '--tamper', dest='tamper_name', action='store', default=None, metavar='<tamper_name>', help='tamper repair function e.g: wordpress') parser_group_scan.add_argument('-l', '--log', dest='log', action='store', default=None, metavar='<log>', help='log name') parser_group_scan.add_argument('-lan', '--language', dest='language', action='store', default=None, help='set target language') parser_group_scan.add_argument('-b', '--blackpath', dest='black_path', action='store', default=None, help='black path list') # for api parser_group_scan.add_argument('-a', '--api', dest='api', action='store_true', default=False, help='without any output for shell') parser_group_scan.add_argument('-y', '--yes', dest='yes', action='store_true', default=False, help='without any output for shell') parser_group_scan.add_argument('--origin', dest='origin', action='store', default=None, metavar='<origin>', help='project origin') parser_group_scan.add_argument('-des', '--description', dest='description', action='store', default=None, metavar='<description>', help='project description') # for log parser_group_scan.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='open debug mode') # for scan profile parser_group_scan.add_argument('-uc', '--unconfirm', dest='unconfirm', action='store_true', default=False, help='show unconfirmed vuls') parser_group_scan.add_argument('-upc', '--unprecom', dest='unprecom', action='store_true', default=False, help='without Precompiled') # for vendor vuln scan parser_group_scan.add_argument( '--without-vendor', dest='without_vendor', action='store_true', default=False, help='without scan vendor vuln (default open)') # show for rule & tamper parser_group_show = subparsers.add_parser( 'show', help='show rule&tamper', description=__introduction__.format(detail='show rule&tamper'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_show.add_argument('list', choices=['rule', "tamper"], action='store', default=None, help='show all rules & tanmpers') parser_group_show.add_argument( '-k', '--key', dest='listkey', action='store', default="all", help='key for show rule & tamper. eg: 1001/wordpress') # for search vendor parser_group_search = subparsers.add_parser( 'search', help='search project by vendor/path/...', description=__introduction__.format( detail='search project by vendor/path/...'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_search.add_argument('stype', choices=['vendor'], default='vendor', help='search type') parser_group_search.add_argument('keyword_name', default='flask', nargs='?', help='keyword name for search') parser_group_search.add_argument('keyword_value', default='1.0.0', nargs='?', help='keyword value for search') parser_group_search.add_argument('--with-vuls', dest='with_vuls', action='store_true', default=False, help='with vuls scan (default False)') # console parser_group_console = subparsers.add_parser( 'console', help='enter console mode', description=__introduction__.format(detail='enter console mode'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_console.add_argument('console', action='store_true', default=True, help='enter console mode') # 加载插件参数列表以及帮助 parser_group_plugin = subparsers.add_parser( 'plugin', help=plugins.PLUGIN_DESCS, description=__introduction__.format(detail=plugins.PLUGIN_DESCS), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_plugin.add_argument('plugin_name', choices=plugins.PLUGIN_LIST, default=False, help='enter plugin name') # web parser_group_web = subparsers.add_parser( 'web', help='KunLun-m Web mode', description=__introduction__.format(detail='KunLun-m Web mode'), formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, add_help=True) parser_group_web.add_argument('-p', '--port', dest='port', action='store', default='9999', help='port for web') # args = parser.parse_args() args = parser.parse_known_args()[0] # log log(logging.INFO) # 插件需要提前声明 if hasattr(args, "plugin_name") and args.plugin_name: logger.info('[INIT] Load Plugin {}.'.format(args.plugin_name)) plugins.PLUGIN_DICT[args.plugin_name](parser, parser_group_plugin) exit() # 其余需要验证 args = parser.parse_args() if hasattr(args, "debug") and args.debug: logger.setLevel(logging.DEBUG) if hasattr(args, "init"): if args.init == 'checksql': logger.info('Show migrate sql.') call_command('sqlmigrate', args.appname, args.migrationname) else: logger.info('Init Database for KunLun-M.') call_command('makemigrations') call_command('migrate') logger.info('Init Database Finished.') exit() if hasattr(args, "port"): logger.info('Start KunLun-M Web in Port: {}'.format(args.port)) call_command('runserver', args.port) if hasattr(args, "load"): if args.load == "load": logger.info("[INIT] RuleCheck start.") RuleCheck().load() logger.info("[INIT] RuleCheck finished.") exit() elif args.load == "recover": logger.info("[INIT] RuleRecover start.") RuleCheck().recover() logger.info("[INIT] RuleRecover finished.") exit() elif args.load == "loadtamper": logger.info("[INIT] TamperCheck start.") TamperCheck().load() logger.info("[INIT] TamperCheck finished.") exit() elif args.load == "retamper": logger.info("[INIT] TamperRecover start.") TamperCheck().recover() logger.info("[INIT] TamperRecover finished.") exit() else: parser_group_core.print_help() exit() if hasattr(args, "list"): if args.list: logger.info("Show {}:\n{}".format( args.list, show_info(args.list, args.listkey.strip("")))) exit() else: parser_group_show.print_help() exit() if hasattr(args, "stype"): # search and show vuls if args.stype: logger.info("[SEARCH] Search Project by {} in {} {}".format( args.stype, args.keyword_name, args.keyword_value)) cli.search_project(args.stype, args.keyword_name, args.keyword_value, args.with_vuls) exit() else: parser_group_show.print_help() exit() if hasattr(args, "console"): # check rule and tamper logger.info("[INIT] RuleCheck start.") RuleCheck().load() logger.info("[INIT] RuleCheck finished.") logger.info("[INIT] TamperCheck start.") TamperCheck().load() logger.info("[INIT] TamperCheck finished.") logger.info("[INIT] Enter KunLun-M console mode.") shell = KunlunInterpreter() shell.start() exit() if not hasattr(args, "target") or args.target == '': parser.print_help() exit() # for api close log if hasattr(args, "api") and args.api: log_rm() logger.debug('[INIT] start Scan Task...') logger.debug('[INIT] set logging level: {}'.format(logger.level)) # check for project data if hasattr(args, "origin") and args.origin: origin = args.origin else: origin = "File in {}".format(args.target) # new scan task task_name = get_mainstr_from_filename(args.target) s = cli.check_scantask(task_name=task_name, target_path=args.target, parameter_config=sys.argv, project_origin=origin, project_des=args.description, auto_yes=args.yes) if s.is_finished: logger.info("[INIT] Finished Task.") exit() # 标识任务id sid = str(s.id) task_id = get_scan_id() # for api if hasattr(args, "api") and args.api: print("TaskID: {}".format(task_id)) else: logger.info("TaskID: {}".format(task_id)) if hasattr(args, "log") and args.log: logger.info("[INIT] New Log file {}.log .".format(args.log)) log_name = args.log else: logger.info("[INIT] New Log file ScanTask_{}.log .".format(sid)) log_name = "ScanTask_{}".format(sid) log_add(logging.DEBUG, log_name) if hasattr(args, "without_vendor"): # 共享变量 import Kunlun_M.settings as settings settings.WITH_VENDOR = False if args.without_vendor else settings.WITH_VENDOR logger.info("[INIT] Vendor Vuls Scan Status: {}".format( settings.WITH_VENDOR)) data = {'status': 'running', 'report': ''} Running(sid).status(data) cli.start(args.target, args.format, args.output, args.special_rules, sid, args.language, args.tamper_name, args.black_path, args.unconfirm, args.unprecom) s.is_finished = True s.save() t2 = time.time() # 如果开启了上传日志到远程,则上传 if IS_OPEN_REMOTE_SERVER: log_path = os.path.join(LOGS_PATH, "{}.log".format(log_name)) upload_log(log_path) logger.info('[INIT] Done! Consume Time:{ct}s'.format(ct=t2 - t1)) except KeyboardInterrupt: logger.warning("[KunLun-M] Stop KunLun-M.") sys.exit(0) except Exception as e: exc_msg = traceback.format_exc() logger.warning(exc_msg)
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05', ), exp_name='demo', save_images=False, save_videos=False, show_image=True): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdir_if_missing(result_root) data_type = 'mot' # run tracking timer = Timer() accs = [] n_frame = 0 timer.tic() for seq in seqs: output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None logger.info('start seq: {}'.format(seq)) dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size) result_filename = os.path.join(result_root, '{}.txt'.format(seq)) meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')]) n_frame += eval_seq(opt, dataloader, data_type, result_filename, save_dir=output_dir, show_image=show_image, frame_rate=frame_rate) # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename)) if save_videos: output_video_path = osp.join(output_dir, '{}.mp4'.format(seq)) cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format( output_dir, output_video_path) os.system(cmd_str) timer.toc() logger.info('Time elapsed: {}, FPS {}'.format(timer.average_time, n_frame / timer.average_time)) # get summary # metrics = ['mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall'] metrics = mm.metrics.motchallenge_metrics mh = mm.metrics.create() summary = Evaluator.get_summary(accs, seqs, metrics) strsummary = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names) print(strsummary) Evaluator.save_summary( summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
def main(opt, data_root='/media/dh/data/MOT16/train', det_root=None, seqs=('MOT16-05', ), exp_name='demo', save_images=False, save_videos=False, show_image=True): logger.setLevel(logging.INFO) result_root = os.path.join(data_root, '..', 'results', exp_name) mkdir_if_missing(result_root) data_type = 'mot' # Read config cfg_dict = parse_model_cfg(opt.cfg) opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])] # run tracking accs = [] n_frame = 0 timer_avgs, timer_calls = [], [] for seq in seqs: output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None logger.info('start seq: {}'.format(seq)) #dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size) print(osp.join(data_root, seq)) dataloader = datasets.LoadVideo(osp.join(data_root, seq)) # print ("DATALOADER", dataloader.vw) result_filename = os.path.join(result_root, '{}.csv'.format(seq)) #meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() #frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')]) frame_rate = 30 nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename, save_dir=output_dir, show_image=show_image, frame_rate=frame_rate, vw=dataloader.vw) n_frame += nf timer_avgs.append(ta) timer_calls.append(tc) # eval logger.info('Evaluate seq: {}'.format(seq)) evaluator = Evaluator(data_root, seq, data_type) accs.append(evaluator.eval_file(result_filename)) if save_videos: output_video_path = osp.join(output_dir, '{}.mp4'.format(seq)) cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format( output_dir, output_video_path) os.system(cmd_str) timer_avgs = np.asarray(timer_avgs) timer_calls = np.asarray(timer_calls) all_time = np.dot(timer_avgs, timer_calls) avg_time = all_time / np.sum(timer_calls) logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format( all_time, 1.0 / avg_time))
def eval_video(**kwargs): logger.setLevel(logging.INFO) cap = cv2.VideoCapture(kwargs['video_source']) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc( *'MP4V') # int(cap.get(cv2.CAP_PROP_FOURCC)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_count = -1 iter_count = 0 each_frame = kwargs['each_frame'] save_dir = kwargs['save_dir'] frames_limit = kwargs['frames_limit'] video_writer = None video_output = kwargs['video_output'] if video_output is not None: logger.info( f'Write video to {video_output} ({width}x{height}, {fps/each_frame} fps) ...' ) video_writer = cv2.VideoWriter(video_output, fourcc, fps / each_frame, frameSize=(width, height)) write_report_to = None data = {} if kwargs['report_output']: write_report_to = kwargs['report_output'] tracker = OnlineTracker(**kwargs) timer = Timer() results = [] wait_time = 1 drv = driver.load_driver('tensorflow') logger.info(f'init person detection driver...') person_detect_driver = drv() person_detect_model = kwargs['person_detect_model'] logger.info(f'loading person detection model {person_detect_model}...') person_detect_driver.load_model(person_detect_model) logger.info(f'person detection model {person_detect_model} loaded') try: while True: frame_count += 1 if frames_limit is not None and frame_count > frames_limit: logger.warn('frames limit {} reached'.format(frames_limit)) break # read each X bgr frame frame = cap.read() # bgr if frame_count % each_frame > 0: continue if isinstance(frame, tuple): frame = frame[1] if frame is None: logger.warn('video capturing finished') break if iter_count % 20 == 0: logger.info( 'Processing frame {} (iteration {}) ({:.2f} fps)'.format( frame_count, iter_count, 1. / max(1e-5, timer.average_time))) det_tlwhs, det_scores = detect_persons_tf(person_detect_driver, frame, threshold=.5) # run tracking timer.tic() online_targets = tracker.update(frame, det_tlwhs, None) online_tlwhs = [] online_ids = [] for t in online_targets: online_tlwhs.append(t.tlwh) online_ids.append(t.track_id) timer.toc() if write_report_to: for i, id in enumerate(online_ids): if id not in data: data[id] = { 'intervals': [], 'images': [], 'last_image': None, } di = data[id]['intervals'] if len(di) == 0 or di[-1][1] < frame_count - each_frame: if len(di) > 0 and di[-1][0] == di[-1][1]: di = di[:-1] di.append([frame_count, frame_count]) else: di[-1][1] = frame_count if not data[id]['last_image'] or data[id][ 'last_image'] < frame_count - fps * 10: data[id]['last_image'] = frame_count tlwh = [max(0, int(o)) for o in online_tlwhs[i]] pers_img = frame[tlwh[1]:tlwh[1] + tlwh[3], tlwh[0]:tlwh[0] + tlwh[2]].copy() if max(pers_img.shape[0], pers_img.shape[1]) > 100: coef = max(pers_img.shape[0], pers_img.shape[1]) / 100 pers_img = cv2.resize( pers_img, (int(pers_img.shape[1] / coef), int(pers_img.shape[0] / coef))) _, pers_img = cv2.imencode('.jpeg', pers_img) data[id]['images'].append( base64.b64encode(pers_img).decode()) # save results frame_id = frame_count # or make it incremental? results.append((frame_id + 1, online_tlwhs, online_ids)) online_im = vis.plot_tracking(frame, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) for tlwh in det_tlwhs: cv2.rectangle( online_im, (tlwh[0], tlwh[1]), # (left, top) (tlwh[0] + tlwh[2], tlwh[1] + tlwh[3]), # (right, bottom) (0, 255, 0), 1, ) if kwargs['show_image']: cv2.imshow('online_im', online_im) if save_dir is not None: save_to = os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)) cv2.imwrite(save_to, online_im) if video_writer is not None: video_writer.write(cv2.resize(online_im, (width, height))) key = cv2.waitKey(wait_time) key = chr(key % 128).lower() if key in [ord('q'), 202, 27]: # 'q' or Esc or 'q' in russian layout exit(0) elif key == 'p': cv2.waitKey(0) elif key == 'a': wait_time = int(not wait_time) iter_count += 1 except (KeyboardInterrupt, SystemExit) as e: logger.info('Caught %s: %s' % (e.__class__.__name__, e)) finally: cv2.destroyAllWindows() if video_writer is not None: logger.info('Written video to %s.' % video_output) video_writer.release() if write_report_to: for i in data: di = data[i] di['index'] = i di['duration'] = sum([i[1] - i[0] for i in di['intervals']]) di['duration_sec'] = '{:.2f}'.format(di['duration'] / fps) di['intervals_str'] = ', '.join([ '{:.2f}-{:.2f}'.format(i[0] / fps, i[1] / fps) for i in di['intervals'] ]) data = data.values() data = sorted(data, key=lambda x: x['duration'], reverse=True) # prepare html tpl = jinja2.Template(template) html = tpl.render(data=data) with open(write_report_to, 'w') as f: f.write(html) update_data({'#documents.persons.html': html}, use_mlboard, mlboard)