Esempio n. 1
0
    def detect_stream(self, stream, options={}, ml_overrides={}):
        """Implements detection on a video stream

        Args:
            stream (string): location of media (file, url or event ID)
            ml_overrides(string): Ignore it. You will almost never need it. zm_detect uses it for ugly foo
            options (dict, optional): Various options that control the detection process. Defaults to {}:
                - delay (int): Delay in seconds before starting media stream
                - download (boolean): if True, will download video before analysis. Defaults to False
                - download_dir (string): directory where downloads will be kept (only applies to videos). Default is /tmp
                - start_frame (int): Which frame to start analysis. Default 1.
                - frame_skip: (int): Number of frames to skip in video (example, 3 means process every 3rd frame)
                - max_frames (int): Total number of frames to process before stopping
                - pattern (string): regexp for objects that will be matched. 'frame_strategy' key below will be applied to only objects that match this pattern
                - frame_set (string): comma separated frames to read. Example 'alarm,21,31,41,snapshot'
                  Note that if you are specifying frame IDs and using ZM, remember that ZM has a frame buffer
                  Default is 20, I think. So you may want to start at frame 21.
                - contig_frames_before_error (int): How many contiguous frames should fail before we give up on reading this stream. Default 5
                - max_attempts (int): Only for ZM indirection. How many times to retry a failed frame get. Default 1
                - sleep_between_attempts (int): Only for ZM indirection. Time to wait before re-trying a failed frame
                - disable_ssl_cert_check (bool): If True (default) will allow self-signed certs to work
                - save_frames (boolean): If True, will save frames used in analysis. Default False
                - save_analyzed_frames (boolean): If True, will save analyzed frames (with boxes). Default False
                - save_frames_dir (string): Directory to save analyzed frames. Default /tmp
                - frame_strategy: (string): various conditions to stop matching as below
                    - 'most_models': Match the frame that has matched most models (does not include same model alternatives) (Default)
                    - 'first': Stop at first match 
                    - 'most': Match the frame that has the highest number of detected objects
                    - 'most_unique' Match the frame that has the highest number of unique detected objects
           
                - resize (int): Width to resize image, default 800
                - polygons(object): object # set of polygons that the detected image needs to intersect
                
        Returns:
           - object: representing matched frame, consists of:

            - box (array): list of bounding boxes for matched frame
            - label (array): list of labels for matched frame
            - confidence (array): list of confidences for matched frame
            - id (int): frame id of matched frame
            - img (cv2 image): image grab of matched frame

           - array of objects:

            - list of boxes,labels,confidences of all frames matched

        Note:

        The same frames are not retrieved depending on whether you set
        ``download`` to ``True`` or ``False``. When set to ``True``, we use
        OpenCV's frame reading logic and when ``False`` we use ZoneMinder's image.php function
        which uses time based approximation. Therefore, the retrieve different frame offsets, but I assume
        they should be reasonably close.
            
        """

        self.ml_overrides = ml_overrides
        self.stream_options = options
        frame_strategy = self.stream_options.get('frame_strategy',
                                                 'most_models')
        all_matches = []
        matched_b = []
        matched_e = []
        matched_l = []
        matched_c = []
        matched_models = []
        matched_frame_id = None
        matched_images = []

        matched_frame_img = None
        manual_locking = False

        if len(self.model_sequence) > 1:
            manual_locking = False
            self.logger.Debug(
                3,
                'Using automatic locking as we are switching between models')
        else:
            manual_locking = True
            self.logger.Debug(
                3, 'Using manual locking as we are only using one model')
            for seq in self.model_sequence:
                self.ml_options[seq]['auto_lock'] = False
        t = Timer()
        media = MediaStream(stream,
                            'video',
                            self.stream_options,
                            logger=self.logger)
        self.media = media

        polygons = copy.copy(self.stream_options.get('polygons', []))

        # Loops across all frames
        while self.media.more():
            frame = self.media.read()
            if frame is None:
                self.logger.Debug(1, 'Ran out of frames to read')
                break
            #fname = '/tmp/{}.jpg'.format(self.media.get_last_read_frame())
            #print (f'Writing to {fname}')
            #cv2.imwrite( fname ,frame)
            self.logger.Debug(
                1, 'perf: Starting for frame:{}'.format(
                    self.media.get_last_read_frame()))
            _labels_in_frame = []
            _boxes_in_frame = []
            _error_boxes_in_frame = []
            _confs_in_frame = []
            _models_in_frame = []

            # For each frame, loop across all models
            found = False
            for seq in self.model_sequence:
                if seq not in self.ml_overrides.get('model_sequence', seq):
                    self.logger.Debug(
                        1, 'Skipping {} as it was overridden in ml_overrides'.
                        format(seq))
                    continue
                self.logger.Debug(
                    1,
                    '============ Frame: {} Running {} model in sequence =================='
                    .format(self.media.get_last_read_frame(), seq))
                pre_existing_labels = self.ml_options.get(seq, {}).get(
                    'general', {}).get('pre_existing_labels')
                if pre_existing_labels:
                    self.logger.Debug(
                        2,
                        'Making sure we have matched one of {} in {} before we proceed'
                        .format(pre_existing_labels, _labels_in_frame))
                    if not any(x in _labels_in_frame
                               for x in pre_existing_labels):
                        self.logger.Debug(
                            1,
                            'Did not find pre existing labels, not running model'
                        )
                        continue

                if not self.models.get(seq):
                    try:
                        self._load_models([seq])
                        if manual_locking:
                            for m in self.models[seq]:
                                m.acquire_lock()
                    except Exception as e:
                        self.logger.Error(
                            'Error loading model for {}:{}'.format(seq, e))
                        self.logger.Debug(2, traceback.format_exc())
                        continue

                same_model_sequence_strategy = self.ml_options.get(
                    seq, {}).get('general',
                                 {}).get('same_model_sequence_strategy',
                                         'first')
                self.logger.Debug(
                    3, '{} has a same_model_sequence strategy of {}'.format(
                        seq, same_model_sequence_strategy))

                # start of same model iteration
                _b_best_in_same_model = []
                _l_best_in_same_model = []
                _c_best_in_same_model = []
                _e_best_in_same_model = []

                cnt = 1
                # For each model, loop across different variations
                for m in self.models[seq]:
                    self.logger.Debug(
                        3,
                        '--------- Frame:{} Running variation: #{} -------------'
                        .format(self.media.get_last_read_frame(), cnt))
                    cnt += 1
                    try:
                        _b, _l, _c = m.detect(image=frame)
                        self.logger.Debug(
                            4,
                            'This model iteration inside {} found: labels: {},conf:{}'
                            .format(seq, _l, _c))
                    except Exception as e:
                        self.logger.Error('Error running model: {}'.format(e))
                        self.logger.Debug(2, traceback.format_exc())
                        continue

                    # Now let's make sure the labels match our pattern
                    _b, _l, _c, _e = self._filter_patterns(
                        seq, _b, _l, _c, polygons)
                    if _e:
                        _e_best_in_same_model.extend(_e)
                    if not len(_l):
                        continue
                    if ((same_model_sequence_strategy == 'first')
                            or ((same_model_sequence_strategy == 'most') and
                                (len(_l) > len(_l_best_in_same_model))) or
                        ((same_model_sequence_strategy == 'most_unique') and
                         (len(set(_l)) > len(set(_l_best_in_same_model))))):
                        _b_best_in_same_model = _b
                        _l_best_in_same_model = _l
                        _c_best_in_same_model = _c
                        _e_best_in_same_model = _e
                    if _l_best_in_same_model and self.stream_options.get(
                            'save_analyzed_frames'
                    ) and self.media.get_debug_filename():
                        d = self.stream_options.get('save_frames_dir', '/tmp')
                        f = '{}/{}-analyzed-{}.jpg'.format(
                            d, self.media.get_debug_filename(),
                            media.get_last_read_frame())
                        self.logger.Debug(
                            4, 'Saving analyzed frame: {}'.format(f))
                        a = utils.draw_bbox(
                            frame, _b_best_in_same_model,
                            _l_best_in_same_model, _c_best_in_same_model,
                            self.stream_options.get('polygons'))
                        for _b in _e_best_in_same_model:
                            cv2.rectangle(a, (_b[0], _b[1]), (_b[2], _b[3]),
                                          (0, 0, 255), 1)
                        cv2.imwrite(f, a)
                    if (same_model_sequence_strategy == 'first') and len(_b):
                        self.logger.Debug(
                            3,
                            'breaking out of same model loop, as matches found and strategy is "first"'
                        )
                        break
                # end of same model sequence iteration
                # at this state x_best_in_model contains the best match across
                # same model variations
                if _l_best_in_same_model:
                    found = True
                    _labels_in_frame.extend(_l_best_in_same_model)
                    _boxes_in_frame.extend(_b_best_in_same_model)
                    _confs_in_frame.extend(_c_best_in_same_model)
                    _error_boxes_in_frame.extend(_e_best_in_same_model)
                    _models_in_frame.append(seq)
                    if (frame_strategy == 'first'):
                        self.logger.Debug(
                            2,
                            'Breaking out of main model loop as strategy is first'
                        )
                        break
                else:
                    self.logger.Debug(
                        2,
                        'We did not find any {} matches in frame: {}'.format(
                            seq, self.media.get_last_read_frame()))

            # end of primary model sequence
            if found:
                all_matches.append({
                    'frame_id': self.media.get_last_read_frame(),
                    'boxes': _boxes_in_frame,
                    'error_boxes': _error_boxes_in_frame,
                    'labels': _labels_in_frame,
                    'confidences': _confs_in_frame,
                    'models': _models_in_frame
                })
                matched_images.append(frame.copy())
                if (frame_strategy == 'first'):
                    self.logger.Debug(
                        2,
                        'Frame strategy is first, breaking out of frame loop')
                    break

        # end of while media loop

        #print ('*********** MATCH_STRATEGY {}'.format(model_match_strategy))
        for idx, item in enumerate(all_matches):
            if ((frame_strategy == 'first')
                    or ((frame_strategy == 'most') and
                        (len(item['labels']) > len(matched_l)))
                    or ((frame_strategy == 'most_models') and
                        (len(item['models']) > len(matched_models)))
                    or ((frame_strategy == 'most_unique') and
                        (len(set(item['labels'])) > len(set(matched_l))))):
                matched_b = item['boxes']
                matched_e = item['error_boxes']
                matched_c = item['confidences']
                matched_l = item['labels']
                matched_frame_id = item['frame_id']
                matched_models = item['models']
                matched_frame_img = matched_images[idx]

        if manual_locking:
            for seq in self.model_sequence:
                for m in self.models[seq]:
                    m.release_lock()

        diff_time = t.stop_and_get_ms()

        self.logger.Debug(
            1,
            'perf: TOTAL detection sequence (with image loads) took: {}  to process {}'
            .format(diff_time, stream))
        self.media.stop()

        matched_data = {
            'boxes': matched_b,
            'error_boxes': matched_e,
            'labels': matched_l,
            'confidences': matched_c,
            'frame_id': matched_frame_id,
            'image_dimensions': self.media.image_dimensions(),
            #'type': matched_type,
            'image': matched_frame_img,
            'polygons': polygons
        }
        # if invoked again, we need to resize polys
        self.has_rescaled = False
        return matched_data, all_matches
Esempio n. 2
0
def main_handler():
    # set up logging to syslog
    # construct the argument parse and parse the arguments
  
    ap = argparse.ArgumentParser()
    ap.add_argument('-c', '--config', help='config file with path')
    ap.add_argument('-e', '--eventid', help='event ID to retrieve')
    ap.add_argument('-p',
                    '--eventpath',
                    help='path to store object image file',
                    default='')
    ap.add_argument('-m', '--monitorid', help='monitor id - needed for mask')
    ap.add_argument('-v',
                    '--version',
                    help='print version and quit',
                    action='store_true')

    ap.add_argument('-o', '--output-path',
                    help='internal testing use only - path for debug images to be written')

    ap.add_argument('-f',
                    '--file',
                    help='internal testing use only - skips event download')


    ap.add_argument('-r', '--reason', help='reason for event (notes field in ZM)')

    ap.add_argument('-n', '--notes', help='updates notes field in ZM with detections', action='store_true')
    ap.add_argument('-d', '--debug', help='enables debug on console', action='store_true')

    args, u = ap.parse_known_args()
    args = vars(args)

    if args.get('version'):
        print('hooks:{} pyzm:{}'.format(hooks_version, pyzm_version))
        exit(0)

    if not args.get('config'):
        print ('--config required')
        exit(1)

    if not args.get('file')and not args.get('eventid'):
        print ('--eventid required')
        exit(1)

    utils.get_pyzm_config(args)

    if args.get('debug'):
        g.config['pyzm_overrides']['dump_console'] = True
        g.config['pyzm_overrides']['log_debug'] = True
        g.config['pyzm_overrides']['log_level_debug'] = 5
        g.config['pyzm_overrides']['log_debug_target'] = None

    if args.get('monitorid'):
        log.init(name='zmesdetect_' + 'm' + args.get('monitorid'), override=g.config['pyzm_overrides'])
    else:
        log.init(name='zmesdetect',override=g.config['pyzm_overrides'])
    g.logger = log
    
    es_version='(?)'
    try:
        es_version=subprocess.check_output(['/usr/bin/zmeventnotification.pl', '--version']).decode('ascii')
    except:
        pass


    try:
        import cv2
    except ImportError as e:
        g.logger.Fatal (f'{e}: You might not have installed OpenCV as per install instructions. Remember, it is NOT automatically installed')

    g.logger.Info('---------| pyzm version:{}, hook version:{},  ES version:{} , OpenCV version:{}|------------'.format(pyzm_version, hooks_version, es_version, cv2.__version__))
   

    
    # load modules that depend on cv2
    try:
        import zmes_hook_helpers.image_manip as img
    except Exception as e:
        g.logger.Error (f'{e}')
        exit(1)
    g.polygons = []

    # process config file
    g.ctx = ssl.create_default_context()
    utils.process_config(args, g.ctx)


    # misc came later, so lets be safe
    if not os.path.exists(g.config['base_data_path'] + '/misc/'):
        try:
            os.makedirs(g.config['base_data_path'] + '/misc/')
        except FileExistsError:
            pass  # if two detects run together with a race here

    if not g.config['ml_gateway']:
        g.logger.Info('Importing local classes for Object/Face')
        import pyzm.ml.object as object_detection
       
    else:
        g.logger.Info('Importing remote shim classes for Object/Face')
        from zmes_hook_helpers.apigw import ObjectRemote, FaceRemote, AlprRemote
    # now download image(s)


    start = datetime.datetime.now()

    obj_json = []

    import pyzm.api as zmapi
    api_options  = {
    'apiurl': g.config['api_portal'],
    'portalurl': g.config['portal'],
    'user': g.config['user'],
    'password': g.config['password'] ,
    'logger': g.logger, # use none if you don't want to log to ZM,
    #'disable_ssl_cert_check': True
    }

    g.logger.Info('Connecting with ZM APIs')
    zmapi = zmapi.ZMApi(options=api_options)
    stream = args.get('eventid') or args.get('file')
    ml_options = {}
    stream_options={}
    secrets = None 
    
    if g.config['ml_sequence'] and g.config['use_sequence'] == 'yes':
        g.logger.Debug(2,'using ml_sequence')
        ml_options = g.config['ml_sequence']
        secrets = pyzmutils.read_config(g.config['secrets'])
        ml_options = pyzmutils.template_fill(input_str=ml_options, config=None, secrets=secrets._sections.get('secrets'))
        ml_options = ast.literal_eval(ml_options)
        g.config['ml_sequence'] = ml_options
    else:
        g.logger.Debug(2,'mapping legacy ml data from config')
        ml_options = utils.convert_config_to_ml_sequence()
    

    if g.config['stream_sequence'] and g.config['use_sequence'] == 'yes': # new sequence
        g.logger.Debug(2,'using stream_sequence')
        stream_options = g.config['stream_sequence']
        stream_options = ast.literal_eval(stream_options)
        g.config['stream_sequence'] = stream_options
    else: # legacy
        g.logger.Debug(2,'mapping legacy stream data from config')
        if g.config['detection_mode'] == 'all':
            g.config['detection_mode'] = 'most_models'
        frame_set = g.config['frame_id']
        if g.config['frame_id'] == 'bestmatch':
            if g.config['bestmatch_order'] == 's,a':
                frame_set = 'snapshot,alarm'
            else:
                frame_set = 'alarm,snapshot'
        stream_options['resize'] =int(g.config['resize']) if g.config['resize'] != 'no' else None

       
        stream_options['strategy'] = g.config['detection_mode'] 
        stream_options['frame_set'] = frame_set       

    # These are stream options that need to be set outside of supplied configs         
    stream_options['api'] = zmapi
    
    stream_options['polygons'] = g.polygons

    '''
    stream_options = {
            'api': zmapi,
            'download': False,
            'frame_set': frame_set,
            'strategy': g.config['detection_mode'],
            'polygons': g.polygons,
            'resize': int(g.config['resize']) if g.config['resize'] != 'no' else None

    }
    '''

   
    m = None
    matched_data = None
    all_data = None

    if not args['file'] and int(g.config['wait']) > 0:
        g.logger.Info('Sleeping for {} seconds before inferencing'.format(
            g.config['wait']))
        time.sleep(g.config['wait'])

    if g.config['ml_gateway']:
        stream_options['api'] = None
        stream_options['monitorid'] = args.get('monitorid')
        start = datetime.datetime.now()
        try:
            matched_data,all_data = remote_detect(stream=stream, options=stream_options, api=zmapi)
            diff_time = (datetime.datetime.now() - start)
            g.logger.Debug(1,'Total remote detection detection took: {}'.format(diff_time))
        except Exception as e:
            g.logger.Error ("Error with remote mlapi:{}".format(e))
            g.logger.Debug(2,traceback.format_exc())

            if g.config['ml_fallback_local'] == 'yes':
                g.logger.Debug (1, "Falling back to local detection")
                stream_options['api'] = zmapi
                from pyzm.ml.detect_sequence import DetectSequence
                m = DetectSequence(options=ml_options, logger=g.logger)
                matched_data,all_data = m.detect_stream(stream=stream, options=stream_options)
    

    else:
        from pyzm.ml.detect_sequence import DetectSequence
        m = DetectSequence(options=ml_options, logger=g.logger)
        matched_data,all_data = m.detect_stream(stream=stream, options=stream_options)
    


    #print(f'ALL FRAMES: {all_data}\n\n')
    #print (f"SELECTED FRAME {matched_data['frame_id']}, size {matched_data['image_dimensions']} with LABELS {matched_data['labels']} {matched_data['boxes']} {matched_data['confidences']}")
    #print (matched_data)
    '''
     matched_data = {
            'boxes': matched_b,
            'labels': matched_l,
            'confidences': matched_c,
            'frame_id': matched_frame_id,
            'image_dimensions': self.media.image_dimensions(),
            'image': matched_frame_img
        }
    '''

    # let's remove past detections first, if enabled 
    if g.config['match_past_detections'] == 'yes' and args.get('monitorid'):
        # point detections to post processed data set
        g.logger.Info('Removing matches to past detections')
        bbox_t, label_t, conf_t = img.processPastDetection(
            matched_data['boxes'], matched_data['labels'], matched_data['confidences'], args.get('monitorid'))
        # save current objects for future comparisons
        g.logger.Debug(1,
            'Saving detections for monitor {} for future match'.format(
                args.get('monitorid')))
        try:
            mon_file = g.config['image_path'] + '/monitor-' + args.get(
            'monitorid') + '-data.pkl'
            f = open(mon_file, "wb")
            pickle.dump(matched_data['boxes'], f)
            pickle.dump(matched_data['labels'], f)
            pickle.dump(matched_data['confidences'], f)
            f.close()
        except Exception as e:
            g.logger.Error(f'Error writing to {mon_file}, past detections not recorded:{e}')

        matched_data['boxes'] = bbox_t
        matched_data['labels'] = label_t
        matched_data['confidences'] = conf_t

    obj_json = {
        'labels': matched_data['labels'],
        'boxes': matched_data['boxes'],
        'frame_id': matched_data['frame_id'],
        'confidences': matched_data['confidences'],
        'image_dimensions': matched_data['image_dimensions']
    }

    # 'confidences': ["{:.2f}%".format(item * 100) for item in matched_data['confidences']],
    
    detections = []
    seen = {}
    pred=''
    prefix = ''

    if matched_data['frame_id'] == 'snapshot':
        prefix = '[s] '
    elif matched_data['frame_id'] == 'alarm':
        prefix = '[a] '
    else:
        prefix = '[x] '
        #g.logger.Debug (1,'CONFIDENCE ARRAY:{}'.format(conf))
    for idx, l in enumerate(matched_data['labels']):
        if l not in seen:
            if g.config['show_percent'] == 'no':
                pred = pred + l + ','
            else:
                pred = pred + l + ':{:.0%}'.format(matched_data['confidences'][idx]) + ' '
            seen[l] = 1

    if pred != '':
        pred = pred.rstrip(',')
        pred = prefix + 'detected:' + pred
        g.logger.Info('Prediction string:{}'.format(pred))
        jos = json.dumps(obj_json)
        g.logger.Debug(1,'Prediction string JSON:{}'.format(jos))
        print(pred + '--SPLIT--' + jos)

        if (matched_data['image'] is not None) and (g.config['write_image_to_zm'] == 'yes' or g.config['write_debug_image'] == 'yes'):
            debug_image = pyzmutils.draw_bbox(image=matched_data['image'],boxes=matched_data['boxes'], 
                                              labels=matched_data['labels'], confidences=matched_data['confidences'],
                                              polygons=g.polygons, poly_thickness = g.config['poly_thickness'])

            if g.config['write_debug_image'] == 'yes':
                for _b in matched_data['error_boxes']:
                    cv2.rectangle(debug_image, (_b[0], _b[1]), (_b[2], _b[3]),
                        (0,0,255), 1)
                filename_debug = g.config['image_path']+'/'+os.path.basename(append_suffix(stream, '-{}-debug'.format(matched_data['frame_id'])))
                g.logger.Debug (1,'Writing bound boxes to debug image: {}'.format(filename_debug))
                cv2.imwrite(filename_debug,debug_image)

            if g.config['write_image_to_zm'] == 'yes' and args.get('eventpath'):
                g.logger.Debug(1,'Writing detected image to {}/objdetect.jpg'.format(
                    args.get('eventpath')))
                cv2.imwrite(args.get('eventpath') + '/objdetect.jpg', debug_image)
                jf = args.get('eventpath')+ '/objects.json'
                g.logger.Debug(1,'Writing JSON output to {}'.format(jf))
                try:
                    with open(jf, 'w') as jo:
                        json.dump(obj_json, jo)
                        jo.close()
                except Exception as e:
                    g.logger.Error(f'Error creating {jf}:{e}')
                    
        if args.get('notes'):
            url = '{}/events/{}.json'.format(g.config['api_portal'], args['eventid'])
            try:
                ev = zmapi._make_request(url=url,  type='get')
            except Exception as e:
                g.logger.Error ('Error during event notes retrieval: {}'.format(str(e)))
                g.logger.Debug(2,traceback.format_exc())
                exit(0) # Let's continue with zmdetect

            new_notes = pred
            if ev.get('event',{}).get('Event',{}).get('Notes'): 
                old_notes = ev['event']['Event']['Notes']
                old_notes_split = old_notes.split('Motion:')
                old_d = old_notes_split[0] # old detection
                try:
                    old_m = old_notes_split[1] 
                except IndexError:
                    old_m = ''
                new_notes = pred + 'Motion:'+ old_m
                g.logger.Debug (1,'Replacing old note:{} with new note:{}'.format(old_notes, new_notes))
                

            payload = {}
            payload['Event[Notes]'] = new_notes
            try:
                ev = zmapi._make_request(url=url, payload=payload, type='put')
            except Exception as e:
                g.logger.Error ('Error during notes update: {}'.format(str(e)))
                g.logger.Debug(2,traceback.format_exc())

        if g.config['create_animation'] == 'yes':
            if not args.get('eventid'):
                g.logger.Error ('Cannot create animation as you did not pass an event ID')
            else:
                g.logger.Debug(1,'animation: Creating burst...')
                try:
                    img.createAnimation(matched_data['frame_id'], args.get('eventid'), args.get('eventpath')+'/objdetect', g.config['animation_types'])
                except Exception as e:
                    g.logger.Error('Error creating animation:{}'.format(e))
                    g.logger.Error('animation: Traceback:{}'.format(traceback.format_exc()))
Esempio n. 3
0
def main_handler():
    # set up logging to syslog
    # construct the argument parse and parse the arguments

    ap = argparse.ArgumentParser()
    ap.add_argument('-c', '--config', help='config file with path')
    ap.add_argument('-e', '--eventid', help='event ID to retrieve')
    ap.add_argument('-p',
                    '--eventpath',
                    help='path to store object image file',
                    default='')
    ap.add_argument('-m', '--monitorid', help='monitor id - needed for mask')
    ap.add_argument('-v',
                    '--version',
                    help='print version and quit',
                    action='store_true')

    ap.add_argument(
        '-o',
        '--output-path',
        help='internal testing use only - path for debug images to be written')

    ap.add_argument('-f',
                    '--file',
                    help='internal testing use only - skips event download')

    ap.add_argument('-r',
                    '--reason',
                    help='reason for event (notes field in ZM)')

    ap.add_argument('-n',
                    '--notes',
                    help='updates notes field in ZM with detections',
                    action='store_true')
    ap.add_argument('-d',
                    '--debug',
                    help='enables debug on console',
                    action='store_true')

    args, u = ap.parse_known_args()
    args = vars(args)

    if args.get('version'):
        print('hooks:{} pyzm:{}'.format(hooks_version, pyzm_version))
        exit(0)

    if not args.get('config'):
        print('--config required')
        exit(1)

    if not args.get('file') and not args.get('eventid'):
        print('--eventid required')
        exit(1)

    utils.get_pyzm_config(args)

    if args.get('debug'):
        g.config['pyzm_overrides']['dump_console'] = True
        g.config['pyzm_overrides']['log_debug'] = True
        g.config['pyzm_overrides']['log_level_debug'] = 5
        g.config['pyzm_overrides']['log_debug_target'] = None

    if args.get('monitorid'):
        log.init(name='zmesdetect_' + 'm' + args.get('monitorid'),
                 override=g.config['pyzm_overrides'])
    else:
        log.init(name='zmesdetect', override=g.config['pyzm_overrides'])
    g.logger = log

    es_version = '(?)'
    try:
        es_version = subprocess.check_output(
            ['/usr/bin/zmeventnotification.pl', '--version']).decode('ascii')
    except:
        pass

    try:
        import cv2
    except ImportError as e:
        g.logger.Fatal(
            f'{e}: You might not have installed OpenCV as per install instructions. Remember, it is NOT automatically installed'
        )

    g.logger.Info(
        '---------| pyzm version:{}, hook version:{},  ES version:{} , OpenCV version:{}|------------'
        .format(pyzm_version, hooks_version, es_version, cv2.__version__))

    # load modules that depend on cv2
    try:
        import zmes_hook_helpers.image_manip as img
    except Exception as e:
        g.logger.Error(f'{e}')
        exit(1)
    g.polygons = []

    # process config file
    g.ctx = ssl.create_default_context()
    utils.process_config(args, g.ctx)

    # misc came later, so lets be safe
    if not os.path.exists(g.config['base_data_path'] + '/misc/'):
        try:
            os.makedirs(g.config['base_data_path'] + '/misc/')
        except FileExistsError:
            pass  # if two detects run together with a race here

    if not g.config['ml_gateway']:
        g.logger.Info('Importing local classes for Object/Face')
        import pyzm.ml.object as object_detection
        import pyzm.ml.hog as hog
    else:
        g.logger.Info('Importing remote shim classes for Object/Face')
        from zmes_hook_helpers.apigw import ObjectRemote, FaceRemote, AlprRemote

    # now download image(s)

    start = datetime.datetime.now()

    obj_json = []

    from pyzm.ml.detect_sequence import DetectSequence
    import pyzm.api as zmapi

    api_options = {
        'apiurl': g.config['api_portal'],
        'portalurl': g.config['portal'],
        'user': g.config['user'],
        'password': g.config['password'],
        'logger': g.logger,  # use none if you don't want to log to ZM,
        #'disable_ssl_cert_check': True
    }

    zmapi = zmapi.ZMApi(options=api_options)
    stream = args.get('eventid') or args.get('file')

    ml_options = {
        'general': {
            'model_sequence': 'object,face,alpr',
            #'model_sequence': 'object,face',
        },
        'object': {
            'general': {
                'same_model_sequence_strategy':
                'first'  # 'first' 'most', 'most_unique'
            },
            'sequence': [
                {
                    #First run on TPU
                    'max_detection_size': g.config['max_detection_size'],
                    'object_weights':
                    '/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite',
                    'object_labels':
                    '/var/lib/zmeventnotification/models/coral_edgetpu/coco_indexed.names',
                    'object_min_confidence': 0.3,
                    'object_framework': 'coral_edgetpu'
                },
                {
                    # YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
                    'max_detection_size': g.config['max_detection_size'],
                    'object_config':
                    '/var/lib/zmeventnotification/models/yolov4/yolov4.cfg',
                    'object_weights':
                    '/var/lib/zmeventnotification/models/yolov4/yolov4.weights',
                    'object_labels':
                    '/var/lib/zmeventnotification/models/yolov4/coco.names',
                    'object_min_confidence': 0.3,
                    'object_framework': 'opencv',
                    'object_processor': 'gpu'
                }
            ]
        },
        'face': {
            'general': {
                'same_model_sequence_strategy': 'first'
            },
            'sequence': [{
                'face_detection_framework': 'dlib',
                'known_images_path':
                '/var/lib/zmeventnotification/known_faces',
                'face_model': 'cnn',
                'face_train_model': 'cnn',
                'face_recog_dist_threshold': 0.6,
                'face_num_jitters': 1,
                'face_upsample_times': 1
            }]
        },
        'alpr': {
            'general': {
                'same_model_sequence_strategy':
                'first',
                'pre_existing_labels':
                ['car', 'motorbike', 'bus', 'truck', 'boat'],
            },
            'sequence': [{
                'alpr_api_type': 'cloud',
                'alpr_service': 'plate_recognizer',
                'alpr_key': g.config['alpr_key'],
                'platrec_stats': 'no',
                'platerec_min_dscore': 0.1,
                'platerec_min_score': 0.2,
            }]
        }
    }  # ml_options

    stream_options = {
        'api': zmapi,
        'download': False,
        'frame_set': 'alarm,snapshot',
        'strategy': 'most_models',
        'polygons': g.polygons,
        'resize':
        int(g.config['resize']) if g.config['resize'] != 'no' else None
    }

    m = DetectSequence(options=ml_options, logger=g.logger)
    matched_data, all_data = m.detect_stream(stream=stream,
                                             options=stream_options)
    #print(f'ALL FRAMES: {all_data}\n\n')
    #print (f"SELECTED FRAME {matched_data['frame_id']}, size {matched_data['image_dimensions']} with LABELS {matched_data['labels']} {matched_data['boxes']} {matched_data['confidences']}")
    '''
     matched_data = {
            'boxes': matched_b,
            'labels': matched_l,
            'confidences': matched_c,
            'frame_id': matched_frame_id,
            'image_dimensions': self.media.image_dimensions(),
            'image': matched_frame_img
        }
    '''

    # let's remove past detections first, if enabled
    if g.config['match_past_detections'] == 'yes' and args.get('monitorid'):
        # point detections to post processed data set
        g.logger.Info('Removing matches to past detections')
        bbox_t, label_t, conf_t = img.processPastDetection(
            matched_data['boxes'], matched_data['labels'],
            matched_data['confidences'], args.get('monitorid'))
        # save current objects for future comparisons
        g.logger.Debug(
            1, 'Saving detections for monitor {} for future match'.format(
                args.get('monitorid')))
        try:
            mon_file = g.config['image_path'] + '/monitor-' + args.get(
                'monitorid') + '-data.pkl'
            f = open(mon_file, "wb")
            pickle.dump(matched_data['boxes'], f)
            pickle.dump(matched_data['labels'], f)
            pickle.dump(matched_data['confidences'], f)
            f.close()
        except Exception as e:
            g.logger.Error(
                f'Error writing to {mon_file}, past detections not recorded:{e}'
            )

        matched_data['boxes'] = bbox_t
        matched_data['labels'] = label_t
        matched_data['confidences'] = conf_t

    obj_json = {
        'labels': matched_data['labels'],
        'boxes': matched_data['boxes'],
        'frame_id': matched_data['frame_id'],
        'confidences': matched_data['confidences'],
        'image_dimensions': matched_data['image_dimensions']
    }

    # 'confidences': ["{:.2f}%".format(item * 100) for item in matched_data['confidences']],

    detections = []
    seen = {}
    pred = ''
    prefix = ''

    if matched_data['frame_id'] == 'snapshot':
        prefix = '[s] '
    elif matched_data['frame_id'] == 'alarm':
        prefix = '[a] '
    else:
        prefix = '[x] '
        #g.logger.Debug (1,'CONFIDENCE ARRAY:{}'.format(conf))
    for idx, l in enumerate(matched_data['labels']):
        if l not in seen:
            if g.config['show_percent'] == 'no':
                pred = pred + l + ','
            else:
                pred = pred + l + ':{:.0%}'.format(
                    matched_data['confidences'][idx]) + ' '
            seen[l] = 1

    if pred != '':
        pred = pred.rstrip(',')
        pred = prefix + 'detected:' + pred
        g.logger.Info('Prediction string:{}'.format(pred))
        jos = json.dumps(obj_json)
        g.logger.Debug(1, 'Prediction string JSON:{}'.format(jos))
        print(pred + '--SPLIT--' + jos)

        if g.config['write_image_to_zm'] == 'yes' or g.config[
                'write_debug_image'] == 'yes':
            debug_image = pyzmutils.draw_bbox(
                image=matched_data['image'],
                boxes=matched_data['boxes'],
                labels=matched_data['labels'],
                confidences=matched_data['confidences'],
                polygons=g.polygons,
                poly_thickness=g.config['poly_thickness'])

            if g.config['write_debug_image'] == 'yes':
                filename_debug = g.config[
                    'image_path'] + '/' + os.path.basename(
                        append_suffix(
                            stream, '-{}-debug'.format(
                                matched_data['frame_id'])))
                g.logger.Debug(
                    1, 'Writing bound boxes to debug image: {}'.format(
                        filename_debug))
                cv2.imwrite(filename_debug, debug_image)
            if g.config['write_image_to_zm'] == 'yes' and args.get(
                    'eventpath'):
                g.logger.Debug(
                    1, 'Writing detected image to {}/objdetect.jpg'.format(
                        args.get('eventpath')))
                cv2.imwrite(
                    args.get('eventpath') + '/objdetect.jpg', debug_image)
                jf = args.get('eventpath') + '/objects.json'
                g.logger.Debug(1, 'Writing JSON output to {}'.format(jf))
                try:
                    with open(jf, 'w') as jo:
                        json.dump(obj_json, jo)
                        jo.close()
                except Exception as e:
                    g.logger.Error(f'Error creating {jf}:{e}')

        if args.get('notes'):
            url = '{}/events/{}.json'.format(g.config['api_portal'],
                                             args['eventid'])
            try:
                ev = zmapi._make_request(url=url, type='get')
            except Exception as e:
                g.logger.Error('Error during event notes retrieval: {}'.format(
                    str(e)))
                g.logger.Debug(2, traceback.format_exc())
                exit(0)  # Let's continue with zmdetect

            new_notes = pred
            if ev.get('event', {}).get('Event', {}).get('Notes'):
                old_notes = ev['event']['Event']['Notes']
                old_notes_split = old_notes.split('Motion:')
                old_d = old_notes_split[0]  # old detection
                try:
                    old_m = old_notes_split[1]
                except IndexError:
                    old_m = ''
                new_notes = pred + 'Motion:' + old_m
                g.logger.Debug(
                    1, 'Replacing old note:{} with new note:{}'.format(
                        old_notes, new_notes))

            payload = {}
            payload['Event[Notes]'] = new_notes
            try:
                ev = zmapi._make_request(url=url, payload=payload, type='put')
            except Exception as e:
                g.logger.Error('Error during notes update: {}'.format(str(e)))
                g.logger.Debug(2, traceback.format_exc())

        if g.config['create_animation'] == 'yes':
            g.logger.Debug(1, 'animation: Creating burst...')
            try:
                img.createAnimation(matched_data['frame_id'],
                                    args.get('eventid'),
                                    args.get('eventpath') + '/objdetect',
                                    g.config['animation_types'])
            except Exception as e:
                g.logger.Error('Error creating animation:{}'.format(e))
                g.logger.Error('animation: Traceback:{}'.format(
                    traceback.format_exc()))