def get_detector(model, weight_file, config): assert model in ['all-in-one', 'two-stage'] if model == 'all-in-one': from detection.core.tensorpack_detector import TensorPackDetector else: from detection.tensorpacks.tensorpack_detector_dev import TensorPackDetector from detection.config.tensorpack_config import config as cfg if config: cfg.update_args(config) return TensorPackDetector(weight_file)
def get_class_ids(self): return set(range(1, cfg.DATA.NUM_CATEGORY + 1)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--config', default='', type=str, help='Configurations of object detection model', nargs='+' ) args = parser.parse_args() if args.config: cfg.update_args(args.config) obj_detector = TensorPackDetector('/root/datasets/figmarcnn/checkpoint') img = cv2.imread('/root/datasets/img-folder/a.png', cv2.IMREAD_COLOR) results = obj_detector.detect(img, rgb=False) final = draw_final_outputs(img, results) # image contain boxes,labels and scores viz = np.concatenate((img, final), axis=1) tpviz.interactive_imshow(viz) ''' --image /root/datasets/myimage/8.jpeg --cam