def main(): FLAGS = parse_args() cfg = load_config(FLAGS.config) merge_config(FLAGS.opt) # disable npu in config by default if 'use_npu' not in cfg: cfg.use_npu = False # disable xpu in config by default if 'use_xpu' not in cfg: cfg.use_xpu = False if cfg.use_gpu: place = paddle.set_device('gpu') elif cfg.use_npu: place = paddle.set_device('npu') elif cfg.use_xpu: place = paddle.set_device('xpu') else: place = paddle.set_device('cpu') if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu: cfg['norm_type'] = 'bn' check_config(cfg) check_gpu(cfg.use_gpu) check_npu(cfg.use_npu) check_xpu(cfg.use_xpu) check_version() run(FLAGS, cfg)
def main(): FLAGS = parse_args() cfg = load_config(FLAGS.config) # TODO: bias should be unified cfg['bias'] = 1 if FLAGS.bias else 0 cfg['classwise'] = True if FLAGS.classwise else False cfg['output_eval'] = FLAGS.output_eval cfg['save_prediction_only'] = FLAGS.save_prediction_only merge_config(FLAGS.opt) # disable npu in config by default if 'use_npu' not in cfg: cfg.use_npu = False # disable xpu in config by default if 'use_xpu' not in cfg: cfg.use_xpu = False if cfg.use_gpu: place = paddle.set_device('gpu') elif cfg.use_npu: place = paddle.set_device('npu') elif cfg.use_xpu: place = paddle.set_device('xpu') else: place = paddle.set_device('cpu') if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu: cfg['norm_type'] = 'bn' if FLAGS.slim_config: cfg = build_slim_model(cfg, FLAGS.slim_config, mode='eval') check_config(cfg) check_gpu(cfg.use_gpu) check_npu(cfg.use_npu) check_xpu(cfg.use_xpu) check_version() run(FLAGS, cfg)
def main(): FLAGS = parse_args() cfg = load_config(FLAGS.config) cfg['use_vdl'] = FLAGS.use_vdl cfg['vdl_log_dir'] = FLAGS.vdl_log_dir merge_config(FLAGS.opt) # disable npu in config by default if 'use_npu' not in cfg: cfg.use_npu = False # disable xpu in config by default if 'use_xpu' not in cfg: cfg.use_xpu = False if cfg.use_gpu: place = paddle.set_device('gpu') elif cfg.use_npu: place = paddle.set_device('npu') elif cfg.use_xpu: place = paddle.set_device('xpu') else: place = paddle.set_device('cpu') if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu: cfg['norm_type'] = 'bn' if FLAGS.slim_config: cfg = build_slim_model(cfg, FLAGS.slim_config, mode='test') check_config(cfg) check_gpu(cfg.use_gpu) check_npu(cfg.use_npu) check_xpu(cfg.use_xpu) check_version() run(FLAGS, cfg)
def main(): """ Main evaluate function """ cfg = load_config(FLAGS.config) merge_config(FLAGS.opt) check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) use_xpu = False if hasattr(cfg, 'use_xpu'): check_xpu(cfg.use_xpu) use_xpu = cfg.use_xpu # check if paddlepaddle version is satisfied check_version() assert not (use_xpu and cfg.use_gpu), \ 'Can not run on both XPU and GPU' main_arch = cfg.architecture multi_scale_test = getattr(cfg, 'MultiScaleTEST', None) # define executor if cfg.use_gpu: place = fluid.CUDAPlace(0) elif use_xpu: place = fluid.XPUPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) # build program model = create(main_arch) startup_prog = fluid.Program() eval_prog = fluid.Program() with fluid.program_guard(eval_prog, startup_prog): with fluid.unique_name.guard(): inputs_def = cfg['EvalReader']['inputs_def'] feed_vars, loader = model.build_inputs(**inputs_def) if multi_scale_test is None: fetches = model.eval(feed_vars) else: fetches = model.eval(feed_vars, multi_scale_test) eval_prog = eval_prog.clone(True) reader = create_reader(cfg.EvalReader, devices_num=1) # When iterable mode, set set_sample_list_generator(reader, place) loader.set_sample_list_generator(reader) dataset = cfg['EvalReader']['dataset'] # eval already exists json file if FLAGS.json_eval: logger.info( "In json_eval mode, PaddleDetection will evaluate json files in " "output_eval directly. And proposal.json, bbox.json and mask.json " "will be detected by default.") json_eval_results( cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset) return compile_program = fluid.CompiledProgram(eval_prog).with_data_parallel() if use_xpu: compile_program = eval_prog assert cfg.metric != 'OID', "eval process of OID dataset \ is not supported." if cfg.metric == "WIDERFACE": raise ValueError("metric type {} does not support in tools/eval.py, " "please use tools/face_eval.py".format(cfg.metric)) assert cfg.metric in ['COCO', 'VOC'], \ "unknown metric type {}".format(cfg.metric) extra_keys = [] if cfg.metric == 'COCO': extra_keys = ['im_info', 'im_id', 'im_shape'] if cfg.metric == 'VOC': extra_keys = ['gt_bbox', 'gt_class', 'is_difficult'] keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys) # whether output bbox is normalized in model output layer is_bbox_normalized = False if hasattr(model, 'is_bbox_normalized') and \ callable(model.is_bbox_normalized): is_bbox_normalized = model.is_bbox_normalized() sub_eval_prog = None sub_keys = None sub_values = None # build sub-program if 'Mask' in main_arch and multi_scale_test: sub_eval_prog = fluid.Program() with fluid.program_guard(sub_eval_prog, startup_prog): with fluid.unique_name.guard(): inputs_def = cfg['EvalReader']['inputs_def'] inputs_def['mask_branch'] = True feed_vars, eval_loader = model.build_inputs(**inputs_def) sub_fetches = model.eval( feed_vars, multi_scale_test, mask_branch=True) assert cfg.metric == 'COCO' extra_keys = ['im_id', 'im_shape'] sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog, extra_keys) sub_eval_prog = sub_eval_prog.clone(True) # load model exe.run(startup_prog) if 'weights' in cfg: checkpoint.load_params(exe, startup_prog, cfg.weights) resolution = None if 'Mask' in cfg.architecture or cfg.architecture == 'HybridTaskCascade': resolution = model.mask_head.resolution results = eval_run(exe, compile_program, loader, keys, values, cls, cfg, sub_eval_prog, sub_keys, sub_values, resolution) # evaluation # if map_type not set, use default 11point, only use in VOC eval map_type = cfg.map_type if 'map_type' in cfg else '11point' save_only = getattr(cfg, 'save_prediction_only', False) eval_results( results, cfg.metric, cfg.num_classes, resolution, is_bbox_normalized, FLAGS.output_eval, map_type, dataset=dataset, save_only=save_only)
def main(): cfg = load_config(FLAGS.config) merge_config(FLAGS.opt) check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # disable npu in config by default and check use_npu if 'use_npu' not in cfg: cfg.use_npu = False check_npu(cfg.use_npu) # disable xpu in config by default and check use_xpu if 'use_xpu' not in cfg: cfg.use_xpu = False check_xpu(cfg.use_xpu) # check if paddlepaddle version is satisfied check_version() main_arch = cfg.architecture dataset = cfg.TestReader['dataset'] test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) dataset.set_images(test_images) if cfg.use_gpu: place = fluid.CUDAPlace(0) elif cfg.use_npu: place = fluid.NPUPlace(0) elif cfg.use_xpu: place = fluid.XPUPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) model = create(main_arch) startup_prog = fluid.Program() infer_prog = fluid.Program() with fluid.program_guard(infer_prog, startup_prog): with fluid.unique_name.guard(): inputs_def = cfg['TestReader']['inputs_def'] inputs_def['iterable'] = True feed_vars, loader = model.build_inputs(**inputs_def) test_fetches = model.test(feed_vars) infer_prog = infer_prog.clone(True) reader = create_reader(cfg.TestReader, devices_num=1) loader.set_sample_list_generator(reader, place) exe.run(startup_prog) if cfg.weights: checkpoint.load_params(exe, infer_prog, cfg.weights) # parse infer fetches assert cfg.metric in ['COCO', 'VOC', 'OID', 'WIDERFACE'], \ "unknown metric type {}".format(cfg.metric) extra_keys = [] if cfg['metric'] in ['COCO', 'OID']: extra_keys = ['im_info', 'im_id', 'im_shape'] if cfg['metric'] == 'VOC' or cfg['metric'] == 'WIDERFACE': extra_keys = ['im_id', 'im_shape'] keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys) # parse dataset category if cfg.metric == 'COCO': from ppdet.utils.coco_eval import bbox2out, mask2out, segm2out, get_category_info if cfg.metric == 'OID': from ppdet.utils.oid_eval import bbox2out, get_category_info if cfg.metric == "VOC": from ppdet.utils.voc_eval import bbox2out, get_category_info if cfg.metric == "WIDERFACE": from ppdet.utils.widerface_eval_utils import bbox2out, lmk2out, get_category_info anno_file = dataset.get_anno() with_background = dataset.with_background use_default_label = dataset.use_default_label clsid2catid, catid2name = get_category_info(anno_file, with_background, use_default_label) # whether output bbox is normalized in model output layer is_bbox_normalized = False if hasattr(model, 'is_bbox_normalized') and \ callable(model.is_bbox_normalized): is_bbox_normalized = model.is_bbox_normalized() # use VisualDL to log image if FLAGS.use_vdl: assert six.PY3, "VisualDL requires Python >= 3.5" from visualdl import LogWriter vdl_writer = LogWriter(FLAGS.vdl_log_dir) vdl_image_step = 0 vdl_image_frame = 0 # each frame can display ten pictures at most. imid2path = dataset.get_imid2path() for iter_id, data in enumerate(loader()): outs = exe.run(infer_prog, feed=data, fetch_list=values, return_numpy=False) res = { k: (np.array(v), v.recursive_sequence_lengths()) for k, v in zip(keys, outs) } logger.info('Infer iter {}'.format(iter_id)) if 'TTFNet' in cfg.architecture: res['bbox'][1].append([len(res['bbox'][0])]) if 'CornerNet' in cfg.architecture: from ppdet.utils.post_process import corner_post_process post_config = getattr(cfg, 'PostProcess', None) corner_post_process(res, post_config, cfg.num_classes) bbox_results = None mask_results = None segm_results = None lmk_results = None if 'bbox' in res: bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized) if 'mask' in res: mask_results = mask2out([res], clsid2catid, model.mask_head.resolution) if 'segm' in res: segm_results = segm2out([res], clsid2catid) if 'landmark' in res: lmk_results = lmk2out([res], is_bbox_normalized) # visualize result im_ids = res['im_id'][0] for im_id in im_ids: image_path = imid2path[int(im_id)] image = Image.open(image_path).convert('RGB') image = ImageOps.exif_transpose(image) # use VisualDL to log original image if FLAGS.use_vdl: original_image_np = np.array(image) vdl_writer.add_image( "original/frame_{}".format(vdl_image_frame), original_image_np, vdl_image_step) image = visualize_results(image, int(im_id), catid2name, FLAGS.draw_threshold, bbox_results, mask_results, segm_results, lmk_results) # use VisualDL to log image with bbox if FLAGS.use_vdl: infer_image_np = np.array(image) vdl_writer.add_image("bbox/frame_{}".format(vdl_image_frame), infer_image_np, vdl_image_step) vdl_image_step += 1 if vdl_image_step % 10 == 0: vdl_image_step = 0 vdl_image_frame += 1 save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info("Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95)
def main(): env = os.environ FLAGS.dist = 'PADDLE_TRAINER_ID' in env \ and 'PADDLE_TRAINERS_NUM' in env \ and int(env['PADDLE_TRAINERS_NUM']) > 1 num_trainers = int(env.get('PADDLE_TRAINERS_NUM', 1)) if FLAGS.dist: trainer_id = int(env['PADDLE_TRAINER_ID']) local_seed = (99 + trainer_id) random.seed(local_seed) np.random.seed(local_seed) if FLAGS.enable_ce: random.seed(0) np.random.seed(0) cfg = load_config(FLAGS.config) merge_config(FLAGS.opt) check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # disable npu in config by default and check use_npu if 'use_npu' not in cfg: cfg.use_npu = False check_npu(cfg.use_npu) use_xpu = False if hasattr(cfg, 'use_xpu'): check_xpu(cfg.use_xpu) use_xpu = cfg.use_xpu # check if paddlepaddle version is satisfied check_version() assert not (use_xpu and cfg.use_gpu), \ 'Can not run on both XPU and GPU' assert not (cfg.use_npu and cfg.use_gpu), \ 'Can not run on both NPU and GPU' save_only = getattr(cfg, 'save_prediction_only', False) if save_only: raise NotImplementedError('The config file only support prediction,' ' training stage is not implemented now') main_arch = cfg.architecture if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() elif cfg.use_npu: devices_num = fluid.core.get_npu_device_count() elif use_xpu: # ToDo(qingshu): XPU only support single card now devices_num = 1 else: devices_num = int(os.environ.get('CPU_NUM', 1)) if cfg.use_gpu and 'FLAGS_selected_gpus' in env: device_id = int(env['FLAGS_selected_gpus']) elif cfg.use_npu and 'FLAGS_selected_npus' in env: device_id = int(env['FLAGS_selected_npus']) elif use_xpu and 'FLAGS_selected_xpus' in env: device_id = int(env['FLAGS_selected_xpus']) else: device_id = 0 if cfg.use_gpu: place = fluid.CUDAPlace(device_id) elif cfg.use_npu: place = fluid.NPUPlace(device_id) elif use_xpu: place = fluid.XPUPlace(device_id) else: place = fluid.CPUPlace() exe = fluid.Executor(place) lr_builder = create('LearningRate') optim_builder = create('OptimizerBuilder') # build program startup_prog = fluid.Program() train_prog = fluid.Program() if FLAGS.enable_ce: startup_prog.random_seed = 1000 train_prog.random_seed = 1000 with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): model = create(main_arch) if FLAGS.fp16: assert (getattr(model.backbone, 'norm_type', None) != 'affine_channel'), \ '--fp16 currently does not support affine channel, ' \ ' please modify backbone settings to use batch norm' with mixed_precision_context(FLAGS.loss_scale, FLAGS.fp16) as ctx: inputs_def = cfg['TrainReader']['inputs_def'] feed_vars, train_loader = model.build_inputs(**inputs_def) train_fetches = model.train(feed_vars) loss = train_fetches['loss'] if FLAGS.fp16: loss *= ctx.get_loss_scale_var() lr = lr_builder() optimizer = optim_builder(lr) optimizer.minimize(loss) if FLAGS.fp16: loss /= ctx.get_loss_scale_var() if 'use_ema' in cfg and cfg['use_ema']: global_steps = _decay_step_counter() ema = ExponentialMovingAverage( cfg['ema_decay'], thres_steps=global_steps) ema.update() # parse train fetches train_keys, train_values, _ = parse_fetches(train_fetches) train_values.append(lr) if FLAGS.eval: eval_prog = fluid.Program() with fluid.program_guard(eval_prog, startup_prog): with fluid.unique_name.guard(): model = create(main_arch) inputs_def = cfg['EvalReader']['inputs_def'] feed_vars, eval_loader = model.build_inputs(**inputs_def) fetches = model.eval(feed_vars) eval_prog = eval_prog.clone(True) eval_reader = create_reader(cfg.EvalReader, devices_num=1) # When iterable mode, set set_sample_list_generator(eval_reader, place) eval_loader.set_sample_list_generator(eval_reader) # parse eval fetches extra_keys = [] if cfg.metric == 'COCO': extra_keys = ['im_info', 'im_id', 'im_shape'] if cfg.metric == 'VOC': extra_keys = ['gt_bbox', 'gt_class', 'is_difficult'] if cfg.metric == 'WIDERFACE': extra_keys = ['im_id', 'im_shape', 'gt_bbox'] eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog, extra_keys) # compile program for multi-devices build_strategy = fluid.BuildStrategy() build_strategy.fuse_all_optimizer_ops = False # only enable sync_bn in multi GPU devices sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn' build_strategy.sync_batch_norm = sync_bn and devices_num > 1 \ and cfg.use_gpu exec_strategy = fluid.ExecutionStrategy() # iteration number when CompiledProgram tries to drop local execution scopes. # Set it to be 1 to save memory usages, so that unused variables in # local execution scopes can be deleted after each iteration. exec_strategy.num_iteration_per_drop_scope = 1 if FLAGS.dist: dist_utils.prepare_for_multi_process(exe, build_strategy, startup_prog, train_prog) exec_strategy.num_threads = 1 exe.run(startup_prog) compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, exec_strategy=exec_strategy) if use_xpu or cfg.use_npu: compiled_train_prog = train_prog if FLAGS.eval: compiled_eval_prog = fluid.CompiledProgram(eval_prog) if use_xpu or cfg.use_npu: compiled_eval_prog = eval_prog fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel' ignore_params = cfg.finetune_exclude_pretrained_params \ if 'finetune_exclude_pretrained_params' in cfg else [] start_iter = 0 if FLAGS.resume_checkpoint: checkpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint) start_iter = checkpoint.global_step() elif cfg.pretrain_weights and fuse_bn and not ignore_params: checkpoint.load_and_fusebn(exe, train_prog, cfg.pretrain_weights) elif cfg.pretrain_weights: checkpoint.load_params( exe, train_prog, cfg.pretrain_weights, ignore_params=ignore_params) train_reader = create_reader( cfg.TrainReader, (cfg.max_iters - start_iter) * devices_num, cfg, devices_num=devices_num, num_trainers=num_trainers) # When iterable mode, set set_sample_list_generator(train_reader, place) train_loader.set_sample_list_generator(train_reader) # whether output bbox is normalized in model output layer is_bbox_normalized = False if hasattr(model, 'is_bbox_normalized') and \ callable(model.is_bbox_normalized): is_bbox_normalized = model.is_bbox_normalized() # if map_type not set, use default 11point, only use in VOC eval map_type = cfg.map_type if 'map_type' in cfg else '11point' train_stats = TrainingStats(cfg.log_iter, train_keys) train_loader.start() start_time = time.time() end_time = time.time() cfg_name = os.path.basename(FLAGS.config).split('.')[0] save_dir = os.path.join(cfg.save_dir, cfg_name) time_stat = deque(maxlen=cfg.log_iter) best_box_ap_list = [0.0, 0] #[map, iter] # use VisualDL to log data if FLAGS.use_vdl: assert six.PY3, "VisualDL requires Python >= 3.5" from visualdl import LogWriter vdl_writer = LogWriter(FLAGS.vdl_log_dir) vdl_loss_step = 0 vdl_mAP_step = 0 for it in range(start_iter, cfg.max_iters): start_time = end_time end_time = time.time() time_stat.append(end_time - start_time) time_cost = np.mean(time_stat) eta_sec = (cfg.max_iters - it) * time_cost eta = str(datetime.timedelta(seconds=int(eta_sec))) outs = exe.run(compiled_train_prog, fetch_list=train_values) stats = {k: np.array(v).mean() for k, v in zip(train_keys, outs[:-1])} # use vdl-paddle to log loss if FLAGS.use_vdl: if it % cfg.log_iter == 0: for loss_name, loss_value in stats.items(): vdl_writer.add_scalar(loss_name, loss_value, vdl_loss_step) vdl_loss_step += 1 train_stats.update(stats) logs = train_stats.log() if it % cfg.log_iter == 0 and (not FLAGS.dist or trainer_id == 0): ips = float(cfg['TrainReader']['batch_size']) / time_cost strs = 'iter: {}, lr: {:.6f}, {}, eta: {}, batch_cost: {:.5f} sec, ips: {:.5f} images/sec'.format( it, np.mean(outs[-1]), logs, eta, time_cost, ips) logger.info(strs) # NOTE : profiler tools, used for benchmark if FLAGS.is_profiler and it == 5: profiler.start_profiler("All") elif FLAGS.is_profiler and it == 10: profiler.stop_profiler("total", FLAGS.profiler_path) return if (it > 0 and it % cfg.snapshot_iter == 0 or it == cfg.max_iters - 1) \ and (not FLAGS.dist or trainer_id == 0): save_name = str(it) if it != cfg.max_iters - 1 else "model_final" if 'use_ema' in cfg and cfg['use_ema']: exe.run(ema.apply_program) checkpoint.save(exe, train_prog, os.path.join(save_dir, save_name)) if FLAGS.eval: # evaluation resolution = None if 'Mask' in cfg.architecture: resolution = model.mask_head.resolution results = eval_run( exe, compiled_eval_prog, eval_loader, eval_keys, eval_values, eval_cls, cfg, resolution=resolution) box_ap_stats = eval_results( results, cfg.metric, cfg.num_classes, resolution, is_bbox_normalized, FLAGS.output_eval, map_type, cfg['EvalReader']['dataset']) # use vdl_paddle to log mAP if FLAGS.use_vdl: vdl_writer.add_scalar("mAP", box_ap_stats[0], vdl_mAP_step) vdl_mAP_step += 1 if box_ap_stats[0] > best_box_ap_list[0]: best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[1] = it checkpoint.save(exe, train_prog, os.path.join(save_dir, "best_model")) logger.info("Best test box ap: {}, in iter: {}".format( best_box_ap_list[0], best_box_ap_list[1])) if 'use_ema' in cfg and cfg['use_ema']: exe.run(ema.restore_program) train_loader.reset()