コード例 #1
0
def main():
    """Prepares data for the person recognition demo"""
    parser = argparse.ArgumentParser(description='Multi camera multi person \
                                                  tracking live demo script')
    parser.add_argument('-i',
                        type=str,
                        nargs='+',
                        help='Input sources (indexes \
                        of cameras or paths to video files)',
                        required=True)

    parser.add_argument('-m',
                        '--m_detector',
                        type=str,
                        required=True,
                        help='Path to the person detection model')
    parser.add_argument('--t_detector',
                        type=float,
                        default=0.6,
                        help='Threshold for the person detection model')

    parser.add_argument('--m_reid',
                        type=str,
                        required=True,
                        help='Path to the person reidentification model')

    parser.add_argument('--output_video', type=str, default='', required=False)
    parser.add_argument('--config', type=str, default='', required=False)
    parser.add_argument('--history_file', type=str, default='', required=False)

    parser.add_argument('-d', '--device', type=str, default='CPU')
    parser.add_argument('-l',
                        '--cpu_extension',
                        help='MKLDNN (CPU)-targeted custom layers.Absolute \
                              path to a shared library with the kernels impl.',
                        type=str,
                        default=None)
    parser.add_argument("--no_show",
                        help="Optional. Don't show output",
                        action='store_true')

    args = parser.parse_args()

    capture = MulticamCapture(args.i)

    log.info("Creating Inference Engine")
    ie = IECore()

    person_detector = Detector(ie, args.m_detector, args.t_detector,
                               args.device, args.cpu_extension,
                               capture.get_num_sources())
    if args.m_reid:
        person_recognizer = VectorCNN(ie, args.m_reid, args.device)
    else:
        person_recognizer = None
    run(args, capture, person_detector, person_recognizer)
    log.info('Demo finished successfully')
コード例 #2
0
def main():
    current_dir = os.path.dirname(os.path.abspath(__file__))
    """Prepares data for the person recognition demo"""
    parser = argparse.ArgumentParser(description='Multi camera multi person \
                                                  tracking live demo script')
    parser.add_argument('-i',
                        type=str,
                        nargs='+',
                        help='Input sources (indexes \
                        of cameras or paths to video files)',
                        required=True)
    parser.add_argument('--config',
                        type=str,
                        default=os.path.join(current_dir, 'config.py'),
                        required=False,
                        help='Configuration file')

    parser.add_argument('--detections',
                        type=str,
                        help='JSON file with bounding boxes')

    parser.add_argument('-m',
                        '--m_detector',
                        type=str,
                        required=False,
                        help='Path to the person detection model')
    parser.add_argument('--t_detector',
                        type=float,
                        default=0.6,
                        help='Threshold for the person detection model')

    parser.add_argument('--m_segmentation',
                        type=str,
                        required=False,
                        help='Path to the person instance segmentation model')
    parser.add_argument(
        '--t_segmentation',
        type=float,
        default=0.6,
        help='Threshold for person instance segmentation model')

    parser.add_argument('--m_reid',
                        type=str,
                        required=True,
                        help='Path to the person re-identification model')

    parser.add_argument('--output_video',
                        type=str,
                        default='',
                        required=False,
                        help='Optional. Path to output video')
    parser.add_argument(
        '--history_file',
        type=str,
        default='',
        required=False,
        help='Optional. Path to file in JSON format to save results of the demo'
    )
    parser.add_argument(
        '--save_detections',
        type=str,
        default='',
        required=False,
        help='Optional. Path to file in JSON format to save bounding boxes')
    parser.add_argument("--no_show",
                        help="Optional. Don't show output",
                        action='store_true')

    parser.add_argument('-d', '--device', type=str, default='CPU')
    parser.add_argument('-l',
                        '--cpu_extension',
                        help='MKLDNN (CPU)-targeted custom layers.Absolute \
                              path to a shared library with the kernels impl.',
                        type=str,
                        default=None)
    parser.add_argument('-u',
                        '--utilization_monitors',
                        default='',
                        type=str,
                        help='Optional. List of monitors to show initially.')

    args = parser.parse_args()
    if check_detectors(args) != 1:
        sys.exit(1)

    if len(args.config):
        log.info('Reading configuration file {}'.format(args.config))
        config = read_py_config(args.config)
    else:
        log.error(
            'No configuration file specified. Please specify parameter \'--config\''
        )
        sys.exit(1)

    random.seed(config['random_seed'])
    capture = MulticamCapture(args.i)

    log.info("Creating Inference Engine")
    ie = IECore()

    if args.detections:
        person_detector = DetectionsFromFileReader(args.detections,
                                                   args.t_detector)
    elif args.m_segmentation:
        person_detector = MaskRCNN(ie, args.m_segmentation,
                                   args.t_segmentation,
                                   args.device, args.cpu_extension,
                                   capture.get_num_sources())
    else:
        person_detector = Detector(ie, args.m_detector, args.t_detector,
                                   args.device, args.cpu_extension,
                                   capture.get_num_sources())

    if args.m_reid:
        person_recognizer = VectorCNN(ie, args.m_reid, args.device,
                                      args.cpu_extension)
    else:
        person_recognizer = None

    run(args, config, capture, person_detector, person_recognizer)
    log.info('Demo finished successfully')
コード例 #3
0
def main():
    """Prepares data for the person recognition demo"""
    parser = argparse.ArgumentParser(description='Multi camera multi person \
                                                  tracking live demo script')
    parser.add_argument('-i',
                        type=str,
                        nargs='+',
                        help='Input sources (indexes \
                        of cameras or paths to video files)',
                        required=True)

    #parser.add_argument('-m', '--m_detector', type=str, required=True,  help='Path to the person detection model')
    parser.add_argument('-m',
                        '--m_detector',
                        type=str,
                        default='model/person-detection-retail-0013.xml')
    parser.add_argument('--t_detector',
                        type=float,
                        default=0.6,
                        help='Threshold for the person detection model')

    #parser.add_argument('--m_reid', type=str, required=True, help='Path to the person reidentification model')
    parser.add_argument(
        '--m_reid',
        type=str,
        default='model/person-reidentification-retail-0031.xml')

    parser.add_argument('--output_video', type=str, default='', required=False)
    #parser.add_argument('--config', type=str, default='', required=False)
    parser.add_argument('--config',
                        type=str,
                        default='config.py',
                        required=False)
    parser.add_argument('--history_file', type=str, default='', required=False)

    parser.add_argument('-d', '--device', type=str, default='CPU')
    #parser.add_argument('-d', '--device', type=str, default='MYRIAD')
    parser.add_argument('-l',
                        '--cpu_extension',
                        help='MKLDNN (CPU)-targeted custom layers.Absolute \
                              path to a shared library with the kernels impl.',
                        type=str,
                        default=None)
    parser.add_argument("--no_show",
                        help="Optional. Don't show output",
                        action='store_true')

    args = parser.parse_args()
    # 위에서 가져온 옵션에 대한 것들 args에 저장됨.
    # args.i 해서 옵션 i 에 저장된것 가져올 수 있음

    capture = MulticamCapture(
        args.i)  # video.py 에 있는 클래스 변수에 포인터 지정함. capture 호출해서 그 안에 있는 함수 사용가능

    log.info("Creating Inference Engine")
    ie = IECore(
    )  #추론엔진 인터페이스 지정하기 (https://docs.openvinotoolkit.org/2019_R3/classie__api_1_1IECore.html)

    person_detector = Detector(ie, args.m_detector, args.t_detector,
                               args.device, args.cpu_extension,
                               capture.get_num_sources())
    # capture.get_num_sources() : 영상의 갯수
    if args.m_reid:
        person_recognizer = VectorCNN(ie, args.m_reid, args.device)
    else:
        person_recognizer = None

    # 20200511 추가
    jot = JotTable()
    run(args, capture, person_detector, person_recognizer, jot)

    log.info('Demo finished successfully')
コード例 #4
0
def main():
    current_dir = os.path.dirname(os.path.abspath(__file__))
    """Prepares data for the object tracking demo"""
    parser = argparse.ArgumentParser(description='Multi camera multi object \
                                                  tracking live demo script')
    parser.add_argument(
        '-i',
        '--input',
        required=True,
        nargs='+',
        help=
        'Required. Input sources (indexes of cameras or paths to video files)')
    parser.add_argument('--loop',
                        default=False,
                        action='store_true',
                        help='Optional. Enable reading the input in a loop')
    parser.add_argument('--config',
                        type=str,
                        default=os.path.join(current_dir, 'configs/person.py'),
                        required=False,
                        help='Configuration file')

    parser.add_argument('--detections',
                        type=str,
                        help='JSON file with bounding boxes')

    parser.add_argument('-m',
                        '--m_detector',
                        type=str,
                        required=False,
                        help='Path to the object detection model')
    parser.add_argument('--t_detector',
                        type=float,
                        default=0.6,
                        help='Threshold for the object detection model')

    parser.add_argument('--m_segmentation',
                        type=str,
                        required=False,
                        help='Path to the object instance segmentation model')
    parser.add_argument(
        '--t_segmentation',
        type=float,
        default=0.6,
        help='Threshold for object instance segmentation model')

    parser.add_argument(
        '--m_reid',
        type=str,
        required=True,
        help='Required. Path to the object re-identification model')

    parser.add_argument('--output_video',
                        type=str,
                        default='',
                        required=False,
                        help='Optional. Path to output video')
    parser.add_argument(
        '--history_file',
        type=str,
        default='',
        required=False,
        help='Optional. Path to file in JSON format to save results of the demo'
    )
    parser.add_argument(
        '--save_detections',
        type=str,
        default='',
        required=False,
        help='Optional. Path to file in JSON format to save bounding boxes')
    parser.add_argument("--no_show",
                        help="Optional. Don't show output",
                        action='store_true')

    parser.add_argument('-d', '--device', type=str, default='CPU')
    parser.add_argument('-u',
                        '--utilization_monitors',
                        default='',
                        type=str,
                        help='Optional. List of monitors to show initially.')

    args = parser.parse_args()
    if check_detectors(args) != 1:
        sys.exit(1)

    if len(args.config):
        log.debug('Reading config from {}'.format(args.config))
        config = read_py_config(args.config)
    else:
        log.error(
            'No configuration file specified. Please specify parameter \'--config\''
        )
        sys.exit(1)

    random.seed(config.random_seed)
    capture = MulticamCapture(args.input, args.loop)

    log.info('OpenVINO Runtime')
    log.info('\tbuild: {}'.format(get_version()))
    core = Core()

    if args.detections:
        object_detector = DetectionsFromFileReader(args.detections,
                                                   args.t_detector)
    elif args.m_segmentation:
        object_detector = MaskRCNN(core, args.m_segmentation,
                                   config.obj_segm.trg_classes,
                                   args.t_segmentation, args.device,
                                   capture.get_num_sources())
    else:
        object_detector = Detector(core, args.m_detector,
                                   config.obj_det.trg_classes, args.t_detector,
                                   args.device, capture.get_num_sources())

    if args.m_reid:
        object_recognizer = VectorCNN(core, args.m_reid, args.device)
    else:
        object_recognizer = None

    run(args, config, capture, object_detector, object_recognizer)
コード例 #5
0
from config import Config
from mc_tracker.mct import MultiCameraTracker
from mc_tracker.sct import SingleCameraTracker

# REID_MODEL
from utils.network_wrappers import VectorCNN

logger = logging.getLogger("api")

config = Config()
os.environ['CUDA_VISIBLE_DEVICES'] = config.DEVICE_ID
app = FastAPI()

number_of_cameras = 1

reid = VectorCNN(config)
global tracker
tracker = {}


class RepeatTimer(Timer):
    def run(self):
        while not self.finished.wait(self.interval):
            self.function(*self.args, **self.kwargs)


def update_db():
    try:
        global parsed_date, parsed_time, tracker
        d, t = datetime.now().strftime("%Y-%m-%d/%H:00:00").split('/')
        if (t != parsed_time):