Exemplo n.º 1
0
    def __init__(self,
                 configPath=None,
                 weightPath=None,
                 labelsPath=None,
                 kernel_fill=3):

        if g.args['gpu'] and not g.args['use_opencv_dnn_cuda']:

            utils.success_print('Using Darknet GPU model for YOLO')
            utils.success_print(
                'If you run out of memory, please tweak yolo.cfg')

            if not g.args['use_opencv_dnn_cuda']:
                self.m = yolo.SimpleYolo(configPath=configPath,
                                         weightPath=weightPath,
                                         darknetLib=g.args['darknet_lib'],
                                         labelsPath=labelsPath,
                                         useGPU=True)

        else:
            utils.success_print('Using OpenCV model for YOLO')
            utils.success_print(
                'If you run out of memory, please tweak yolo.cfg')

            self.net = cv2.dnn.readNetFromDarknet(configPath, weightPath)
            self.labels = open(labelsPath).read().strip().split("\n")
            np.random.seed(42)
            self.colors = np.random.randint(0,
                                            255,
                                            size=(len(self.labels), 3),
                                            dtype="uint8")
            self.kernel_fill = np.ones((kernel_fill, kernel_fill), np.uint8)

            if g.args['use_opencv_dnn_cuda'] and g.args['gpu']:
                (maj, minor, patch) = cv2.__version__.split('.')
                min_ver = int(maj + minor)
                if min_ver < 42:
                    utils.fail_print('Not setting CUDA backend for OpenCV DNN')
                    utils.dim_print(
                        'You are using OpenCV version {} which does not support CUDA for DNNs. A minimum of 4.2 is required. See https://www.pyimagesearch.com/2020/02/03/how-to-use-opencvs-dnn-module-with-nvidia-gpus-cuda-and-cudnn/ on how to compile and install openCV 4.2'
                        .format(cv2.__version__))
                else:
                    utils.success_print(
                        'Setting CUDA backend for OpenCV. If you did not set your CUDA_ARCH_BIN correctly during OpenCV compilation, you will get errors during detection related to invalid device/make_policy'
                    )
                    self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
                    self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

        utils.success_print('YOLO initialized')
Exemplo n.º 2
0
def search_video(input_file=None, out_file=None, eid = None, mid = None):
    utils.dim_print ('Analyzing: {}'.format(input_file))
    vid = cv2.VideoCapture(input_file)
    orig_fps = max(1, (g.args['fps'] or int(vid.get(cv2.CAP_PROP_FPS))))
    frame_found = False # if any match found, this will be true
    out = None

    det_type = 'found' if g.args['present'] else 'missing'
    set_frames = {
        'eventid': eid,
        'monitorid': mid,
        'type': det_type,
        'frames':[]
    }

    # if we want to write frames to a new video, 
    # make sure it uses the same FPS as the input video and is of the same size
    if g.args['write']:
        width  = int(vid.get(3))
        height = int(vid.get(4))
        if g.args['resize']:
            resize = g.args['resize']
            width = int (width * resize)
            height = int (height * resize)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        h,t = os.path.split(input_file)
        h = h or '.'
        dt = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
        if not out_file:
            out_file = h+'/analyzed-'+dt+'-'+t
        out = cv2.VideoWriter(out_file, fourcc, orig_fps, (width,height)) 
        print ('If frames are matched, will write to output video: {}'.format(out_file))

    # get metadata from the input video. There are times this may be off
    # fps_skip is set to 1/2 of FPS. So if you analyze a video with 10FPS, we will skip every 5 frames during analysis
    # basically, I think 2 fps for analysis is sufficient. You can override this


    if g.args['skipframes']:
        fps_skip = g.args['skipframes']
    else:
        fps_skip = max(1, int(vid.get(cv2.CAP_PROP_FPS)/2))
  
    total_frames =  int(vid.get(cv2.CAP_PROP_FRAME_COUNT)) 
    start_time = time.time()

    utils.dim_print ('fps={}, skipping {} frames, total frames={}'.format(orig_fps, fps_skip, total_frames))
    utils.dim_print ('threshold={}, search type=if {}'.format(g.args['threshold'], det_type))
    frame_cnt = 0
    bar = tqdm(total=total_frames) 
    
    # now loop through the input video
    while True:
        succ, frame = vid.read()
        if not succ:
        
            break
        frame_cnt = frame_cnt + 1
        bar.update(1)
        if frame_cnt % fps_skip: 
            # skip frames based on our skip frames count. We don't really need to process every frame
            continue
       
        if g.args['resize']:
            resize = g.args['resize']
            rh, rw, rl = frame.shape
            frame = cv2.resize(frame, (int(rw*resize), int(rh*resize)))
        
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if g.args['display']:
            cv2.imshow('frame', frame_gray)
            cv2.imshow('find', g.template)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                exit(1)

        tl,br, minv, maxv = find_in_frame(frame_gray, g.template)
        #print (maxv)
        if maxv >= g.args['threshold'] and g.args['present']:
            # if we want to record frames where the object is present
            set_frames['frames'].append ({'time': int(frame_cnt/orig_fps), 'frame':frame_cnt, 'location':(tl,br), 'accuracy':'{:.2%}'.format(maxv)})
            #matched.append('{}s, Frame: {}, at:{},{} (accuracy:{:.2%})'.format(int(frame_cnt/orig_fps),frame_cnt, tl, br, maxv))
            cv2.rectangle(frame, tl, br, (255,0,0), 2)
            if g.args['write']: 
                # put a box around the object, write to video
                text = '{}s, Frame: {}'.format(int(frame_cnt/orig_fps), frame_cnt)
        
                (tw, th) = cv2.getTextSize(text, cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, thickness=1)[0]
                cv2.rectangle(frame, (width-tw-5,height-th-5), (width,height), (0,0,0), cv2.FILLED)
                cv2.putText(frame, text, (width-tw-2, height-2), cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, color=(255,255,255), thickness=1)
                out.write(frame)
            frame_found = True
            if not g.args['all']: break
        if maxv < g.args['threshold'] and not g.args['present']:
            # if we want to record frames where the object is absent
            set_frames['frames'].append ({ 'time': int(frame_cnt/orig_fps), 'frame':frame_cnt, 'location':None, 'accuracy':'{:.2%}'.format(maxv)})
            #missing.append('{}s, Frame: {} (accuracy:{:.2%})'.format(int(frame_cnt/orig_fps),frame_cnt, maxv))
            if g.args['write']: 
                text = 'MISSING: {}s, Frame: {}'.format(int(frame_cnt/orig_fps), frame_cnt)
                (tw, th) = cv2.getTextSize(text, cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, thickness=1)[0]
                cv2.rectangle(frame, (width-tw-5,height-th-5), (width,height), (0,0,255), cv2.FILLED)
                cv2.putText(frame, text, (width-tw-2, height-2), cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, color=(255,255,255), thickness=1)
                out.write(frame)
            frame_found = True
            if not g.args['all']: break
        
        frame_cnt = frame_cnt+1
    # all done
    end_time = time.time()
    bar.close()
    # dump matches
    if frame_found:
        if g.args['present']:
            utils.success_print ('Match found in {} frames, starting at {}s, with initial accuracy of {}'.format(len(set_frames['frames']),set_frames['frames'][0]['time'], set_frames['frames'][0]['accuracy']))
            g.json_out.append(set_frames)
           # for match in matched:
           #    print (match)
        else:
          utils.success_print ('Object missing in {} frames, starting at {}s'.format(len(set_frames['frames']),set_frames['frames'][0]['time']))
          g.json_out.append(set_frames)
          #  for miss in missing:
          #      print (miss)
    else:
        print ('No matches found')
    if g.args['write']:
        if frame_found:
            utils.success_print ('Video of frames written  to {}'.format(out_file))
        else:
            os.remove(out_file) # blank file, no frames

    try:
        if remove_downloaded:
            os.remove(g.args['input']) # input was a remote file that was downloaded, so remove local download
    except:
        pass

    print ('\nTime: {:.2}s'.format(end_time-start_time))
    bar.close()
    vid.release()
    if out: out.release()

    return frame_found
Exemplo n.º 3
0
def blend_video(input_file=None,
                out_file=None,
                eid=None,
                mid=None,
                starttime=None,
                delay=0):

    global det, det2
    create_blend = False
    blend_frame_written_count = 0

    set_frames = {
        'eventid': eid,
        'monitorid': mid,
        'type': 'object',
        'frames': []
    }

    print('Blending: {}'.format(utils.secure_string(input_file)))

    vid = FVS.FileVideoStream(input_file)
    time.sleep(1)
    #vid = cv2.VideoCapture(input_file)
    cvobj = vid.get_stream_object()
    if not cvobj.isOpened():
        raise ValueError('Error reading video {}'.format(
            utils.secure_string(input_file)))

    total_frames_vid = int(cvobj.get(cv2.CAP_PROP_FRAME_COUNT))
    vid.start()

    if not g.orig_fps:
        orig_fps = max(1, (g.args['fps'] or int(cvobj.get(cv2.CAP_PROP_FPS))))
        g.orig_fps = orig_fps
    else:
        orig_fps = g.orig_fps

    width = int(cvobj.get(3))
    height = int(cvobj.get(4))
    if g.args['resize']:
        resize = g.args['resize']
        #print (width,height, resize)
        width = int(width * resize)
        height = int(height * resize)

    total_frames_vid_blend = 0
    if os.path.isfile(blend_filename):
        vid_blend = FVS.FileVideoStream(blend_filename)
        time.sleep(1)
        cvobj_blend = vid_blend.get_stream_object()
        total_frames_vid_blend = int(cvobj_blend.get(cv2.CAP_PROP_FRAME_COUNT))
        vid_blend.start()
        #vid_blend = cv2.VideoCapture(blend_filename)
        #utils.dim_print('Video blend {}'.format(vid_blend))
    else:
        vid_blend = None
        cvobj_blend = None
        print('blend file will be created in this iteration')
        create_blend = True

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    outf = cv2.VideoWriter('new-blended-temp.mp4', fourcc, orig_fps,
                           (width, height))
    utils.bold_print('Output video will be {}*{}@{}fps'.format(
        width, height, orig_fps))

    if g.args['skipframes']:
        fps_skip = g.args['skipframes']
    else:
        fps_skip = max(1, int(cvobj.get(cv2.CAP_PROP_FPS) / 2))

    if vid_blend:
        utils.dim_print('frames in new video: {} vs blend: {}'.format(
            total_frames_vid, total_frames_vid_blend))

    start_time = time.time()
    utils.dim_print('fps={}, skipping {} frames'.format(orig_fps, fps_skip))
    utils.dim_print('delay for new video is {}s'.format(delay))

    bar_new_video = tqdm(total=total_frames_vid, desc='New video', miniters=10)
    bar_blend_video = tqdm(total=total_frames_vid_blend,
                           desc='Blend',
                           miniters=10)

    is_trailing = False
    blend_frames_read = 0
    # first wait for delay seconds
    # will only come in if blend video exists, as in first iter it is 0
    # However, if blend wasn't created (no relevant frames), ignore delay
    if delay and not create_blend:
        frame_cnt = 0
        bar_new_video.set_description('waiting for {}s'.format(delay))
        prev_good_frame_b = None
        a = 0
        b = 0
        while True:
            if vid_blend and vid_blend.more():
                frame_b = vid_blend.read()
                if frame_b is None:
                    succ_b = False
                else:
                    succ_b = True
                    a = a + 1
                    #print ('delay read: {}'.format(a))
                    blend_frames_read = blend_frames_read + 1
                    prev_good_frame_b = frame_b
            else:
                succ_b = False
                vid_blend = None

            # If we have reached the end of blend, but have a good last frame
            # lets use it
            if not succ_b and prev_good_frame_b is not None:
                frame_b = prev_good_frame_b
                succ_b = True

            if not succ_b and not prev_good_frame_b:
                break

            frame_cnt = frame_cnt + 1
            bar_blend_video.update(1)
            outf.write(frame_b)
            frame_dummy = np.zeros_like(frame_b)
            if g.args['display']:
                x = 320
                y = 240
                r_frame_b = cv2.resize(frame_b, (x, y))
                r_frame_dummy = cv2.resize(frame_dummy, (x, y))

                h1 = np.hstack((r_frame_dummy, r_frame_dummy))
                h2 = np.hstack((r_frame_dummy, r_frame_b))
                f = np.vstack((h1, h2))
                cv2.imshow('display', f)

            if g.args['interactive']:
                key = cv2.waitKey(0)

            else:
                key = cv2.waitKey(1)
            if key & 0xFF == ord('q'):
                exit(1)
            if key & 0xFF == ord('c'):
                g.args['interactive'] = False

            blend_frame_written_count = blend_frame_written_count + 1
            b = b + 1
            #print ('delay write: {}'.format(b))
            if (delay * orig_fps < frame_cnt):
                # if (frame_cnt/orig_fps > delay):
                #utils.dim_print('wait over')
                #  print ('DELAY={} ORIGFPS={} FRAMECNT={}'.format(delay, orig_fps, frame_cnt))
                break

    # now read new video along with blend
    bar_new_video.set_description('New video')
    frame_cnt = 0

    while True:
        if vid.more():
            frame = vid.read()
            if frame is None:
                succ = False
            else:
                succ = True
        else:
            frame = None
            succ = False

        #succ, frame = vid.read()

        frame_cnt = frame_cnt + 1
        bar_new_video.update(1)

        if frame_cnt % fps_skip:
            continue

        if succ and g.args['resize']:
            resize = g.args['resize']
            rh, rw, rl = frame.shape
            frame = cv2.resize(frame, (int(rw * resize), int(rh * resize)))

        succ_b = False
        if vid_blend:
            if vid_blend.more():
                frame_b = vid_blend.read()
                if frame_b is None:
                    succ_b = False
                else:
                    succ_b = True
                    bar_blend_video.update(1)
                    blend_frames_read = blend_frames_read + 1

        if not succ and not succ_b:
            bar_blend_video.write('both videos are done')
            break

        elif succ and succ_b:
            analyze = True
            relevant = False  # may change on analysis
            #print ("succ and succ_b")

        elif succ and not succ_b:
            # print ('blend over')
            frame_b = frame.copy()
            analyze = True
            relevant = False  # may change on analysis
            #print ("succ and not succ_b")

        elif not succ and succ_b:
            merged_frame = frame_b
            frame = frame_b
            boxed_frame = np.zeros_like(frame_b)
            txh, txw, _ = frame_b.shape
            frame_mask = np.zeros((txh, txw), dtype=np.uint8)
            foreground_a = np.zeros_like(frame_b)
            analyze = False
            relevant = True
            #print ("not succ and succ_b")

        if analyze:
            # only if both blend and new were read
            if g.args['balanceintensity']:
                intensity = np.mean(frame)
                intensity_b = np.mean(frame_b)
                if intensity > intensity_b:
                    # new frame is brighter
                    frame_b = utils.hist_match(frame_b, frame)
                else:
                    # blend is brighter
                    frame = utils.hist_match(frame, frame_b)
            #h1, w1 = frame.shape[:2]
            #hm, wm = frame_b.shape[:2]

            #print ("{}*{} frame == {}*{} frame_b".format(h1,w1,hm,wm))
            merged_frame, foreground_a, frame_mask, relevant, boxed_frame = det.detect(
                frame, frame_b, frame_cnt, orig_fps, starttime, set_frames)
            #print ('RELEVANT={}'.format(relevant))
            if relevant and g.args['detection_type'] == 'mixed':
                bar_new_video.set_description('YOLO running')
                #utils.dim_print('Adding YOLO, found relevance in backgroud motion')
                merged_frame, foreground_a, frame_mask, relevant, boxed_frame = det2.detect(
                    frame, frame_b, frame_cnt, orig_fps, starttime, set_frames)
                #print ('YOLO RELEVANT={}'.format(relevant))
                bar_new_video.set_description('New video')

            if relevant:
                is_trailing = True
                trail_frames = 0

        if g.args['display']:
            x = 320
            y = 240
            r_frame_b = cv2.resize(frame_b, (x, y))
            r_frame = cv2.resize(boxed_frame, (x, y))
            r_fga = cv2.resize(foreground_a, (x, y))
            r_frame_mask = cv2.resize(frame_mask, (x, y))
            r_frame_mask = cv2.cvtColor(r_frame_mask, cv2.COLOR_GRAY2BGR)
            r_merged_frame = cv2.resize(merged_frame, (x, y))
            h1 = np.hstack((r_frame, r_frame_mask))
            h2 = np.hstack((r_fga, r_merged_frame))
            f = np.vstack((h1, h2))
            cv2.imshow('display', f)
            #cv2.imshow('merged_frame',cv2.resize(merged_frame, (640,480)))
            #cv2.imshow('frame_mask',cv2.resize(frame_mask, (640,480)))

            #cv2.imshow('frame_mask',frame_mask)

        if g.args['interactive']:
            key = cv2.waitKey(0)

        else:
            key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            exit(1)
        if key & 0xFF == ord('c'):
            g.args['interactive'] = False

        # if we read a blend frame, merged frame will always be written
        # if we don't have a blend frame, then we write new frame only if its relevant
        # assuming we want relevant frames
        if relevant or not g.args['relevantonly'] or succ_b:
            #print ("WRITING")
            outf.write(merged_frame)
            blend_frame_written_count = blend_frame_written_count + 1
        elif is_trailing:
            trail_frames = trail_frames + 1

            if trail_frames > g.args['trailframes']:
                start_trailing = False
            else:
                bar_new_video.set_description('Trailing frame')
                # bar_new_video.write('trail frame: {}'.format(trail_frames))
                outf.write(merged_frame)
                blend_frame_written_count = blend_frame_written_count + 1
        else:
            #print ('irrelevant frame {}'.format(frame_cnt))
            pass

    bar_blend_video.close()
    bar_new_video.close()
    vid.stop()
    outf.release()
    if vid_blend: vid_blend.stop()
    print('\n')
    #input("Press Enter to continue...")
    if create_blend and blend_frame_written_count == 0:
        utils.fail_print(
            'No relevant frames found, blend file not created. Will try next iteration'
        )
        os.remove('new-blended-temp.mp4')
    else:
        rel = 'relevant ' if g.args['relevantonly'] else ''
        utils.success_print(
            '{} total {}frames written to blend file ({} read)'.format(
                blend_frame_written_count, rel, blend_frames_read))
        if blend_frame_written_count:
            try:
                os.remove(blend_filename)
            except:
                pass
            os.rename('new-blended-temp.mp4', blend_filename)
            utils.success_print(
                'Blended file updated in {}'.format(blend_filename))
        else:
            utils.success_print(
                'No frames written this round, not updating blend file')
        g.json_out.append(set_frames)

    return False
Exemplo n.º 4
0
def process_timeline():

    try:
        os.remove('blended.mp4')
    except:
        pass

    url = g.args['portal'] + '/api/events/index/StartTime >=:' + g.args[
        'from'] + '/EndTime <=:' + g.args['to']
    if g.args['objectonly']:
        url = url + '/Notes REGEXP:detected:'
    if g.args['alarmonly']:
        url = url + '/AlarmFrames >=:' + str(g.args['minalarmframes'])
    if g.args['blend'] and len(g.mon_list) > 1:
        utils.bold_print(
            'You have chosen to blend events from multiple monitors. Results may be poor. Blending should typically be done on a fixed view (single monitor)'
        )
    for mon in g.mon_list:
        #   print (mon)
        url = url + '/MonitorId =:' + str(mon)

    url = url + '.json?sort=StartTime&direction=asc&username='******'username'] + '&password='******'password']
    print('Getting list of events using: {}'.format(url))
    resp = requests.get(url)
    #print (resp.json())
    events = resp.json()['events']

    cnt = 0
    delay = 0
    for event in events:
        cnt = cnt + 1
        #print (event['Event']['Id'])
        url_download = g.args[
            'portal'] + '/index.php?view=view_video&eid=' + event['Event'][
                'Id'] + '&username='******'username'] + '&password='******'password']
        in_file = url_download

        print(
            '\n==============| Processing Event: {} Monitor: {} ({} of {})|============='
            .format(event['Event']['Id'], event['Event']['MonitorId'], cnt,
                    len(events)))

        #print ("VIDEO ID IS:",event['Event']['DefaultVideo'])
        if event['Event']['DefaultVideo'] is "":
            utils.fail_print("ERROR: only mp4 events supported, skipping")
            continue
        if g.args['download']:
            in_file = event['Event']['Id'] + '.mp4'
            utils.dim_print('downloading {}'.format(url_download))
            try:
                urllib.request.urlretrieve(url_download, in_file)
            except IOError as e:
                utils.fail_print('ERROR:{}'.format(e))
            except:  #handle other exceptions such as attribute errors
                utils.fail_print("Unexpected error:" + sys.exc_info()[0])

        g.out_file = 'analyzed-' + event['Event']['Id'] + '.mp4'

        #print (in_file, out_file)
        try:
            if g.args['blend']:
                res = zmm_blend.blend_video(
                    input_file=in_file,
                    out_file=g.out_file,
                    eid=event['Event']['Id'],
                    mid=event['Event']['MonitorId'],
                    starttime=event['Event']['StartTime'],
                    delay=delay)
                delay = delay + g.args['blenddelay']
            elif g.args['annotate']:
                res = zmm_annotate.annotate_video(
                    input_file=in_file,
                    out_file=g.out_file,
                    eid=event['Event']['Id'],
                    mid=event['Event']['MonitorId'],
                    starttime=event['Event']['StartTime'])

            elif g.args['find']:
                res = zmm_search.search_video(input_file=in_file,
                                              out_file=g.out_file,
                                              eid=event['Event']['Id'],
                                              mid=event['Event']['MonitorId'])
            else:
                raise ValueError('Unknown mode?')
            if not g.args['all'] and res:
                break
        except IOError as e:
            utils.fail_print('ERROR:{}'.format(e))

        if g.args['download']:
            try:
                os.remove(in_file)
            except:
                pass
Exemplo n.º 5
0
                help='look for frames where image in --match is present')
ap.add_argument(
    '--gpu',
    nargs='?',
    default=True,
    const=True,
    type=utils.str2bool,
    help='enable GPU processing. Needs libdarknet.so compiled in GPU mode')

g.args = vars(ap.parse_args())
utils.process_config()

if g.args['blend']: zmm_blend.blend_init()
if g.args['annotate']: zmm_annotate.annotate_init()

utils.dim_print('-----| Arguments to be used:')
for k, v in g.args.items():
    utils.dim_print('{}={}'.format(k, v))
print('\n')

start_time = time.time()
if g.args['from'] or g.args['to']:
    # if its a time range, ignore event/input
    process_timeline()

else:
    if g.args['eventid']:

        # we need to construct the url
        g.args['input'] = g.args[
            'portal'] + '/index.php?view=view_video&eid=' + g.args[
Exemplo n.º 6
0
def annotate_video(input_file=None,  eid = None, mid = None, starttime=None):

    global det, det2

    set_frames = {
        'eventid': eid,
        'monitorid': mid,
        'type': 'object',
        'frames':[]
        }

    print ('annotating: {}'.format(utils.secure_string(input_file)))
    
    #vid = cv2.VideoCapture(input_file)
    vid = FVS.FileVideoStream(input_file)
    time.sleep(1)
    cvobj = vid.get_stream_object()
    vid.start()
    if not cvobj.isOpened(): 
        raise ValueError('Error reading video {}'.format(utils.secure_string(input_file)))

    if not g.orig_fps:
        orig_fps = max(1, (g.args['fps'] or int(cvobj.get(cv2.CAP_PROP_FPS))))
        g.orig_fps = orig_fps
    else:
        orig_fps = g.orig_fps

    
    width  = int(cvobj.get(3))
    height = int(cvobj.get(4))
    
    if g.args['resize']:
        resize = g.args['resize']
       # print (width,height, resize)
        width = int(width * resize)
        height = int(height * resize)
    
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    outf = cv2.VideoWriter(annotate_filename, fourcc, orig_fps, (width,height), True) 
    utils.bold_print('Output video will be {}px*{}px @ {}fps'.format(width, height, orig_fps))

    if g.args['skipframes']:
        fps_skip = g.args['skipframes']
    else:
        fps_skip = max(1,int(cvobj.get(cv2.CAP_PROP_FPS)/2))


    total_frames =  int(cvobj.get(cv2.CAP_PROP_FRAME_COUNT)) 
   

    start_time = time.time()
    utils.dim_print ('fps={}, skipping {} frames'.format(orig_fps, fps_skip))
    

    bar_annotate_video = tqdm (total=total_frames, desc='annotating')

    frame_cnt = 0
    while True:
        if vid.more():
            frame = vid.read()
            if frame is None:
                succ = False
            else:
                succ = True
        else:
            frame = None
            succ = False
        #succ, frame = vid.read()
        if not succ: break
       
        frame_cnt = frame_cnt + 1

        if not frame_cnt % 10:
            bar_annotate_video.update(10)
            

        if frame_cnt % fps_skip:
            continue
      
        if succ and g.args['resize']:
            resize = g.args['resize']
            rh, rw, rl = frame.shape
            frame = cv2.resize(frame, (int(rw*resize), int(rh*resize)))

        frame_b = frame.copy()
        merged_frame, foreground_a, frame_mask, relevant, boxed_frame = det.detect(frame, frame_b, frame_cnt, orig_fps, starttime, set_frames)
        if relevant and g.args['detection_type'] == 'mixed':
            bar_annotate_video.set_description('YOLO running')
            #utils.dim_print('Adding YOLO, found relevance in backgroud motion')
            merged_frame, foreground_a, frame_mask, relevant, boxed_frame = det2.detect(frame, frame_b, frame_cnt, orig_fps, starttime, set_frames)  
            bar_annotate_video.set_description('annotating')        
      
        if g.args['display']:
                x = 320
                y = 240
                r_frame_b = cv2.resize (frame_b, (x, y))
                r_frame = cv2.resize (boxed_frame, (x,y))
                r_fga = cv2.resize (foreground_a, (x,y))
                r_frame_mask = cv2.resize (frame_mask, (x, y))
                r_frame_mask = cv2.cvtColor(r_frame_mask, cv2.COLOR_GRAY2BGR)
                r_merged_frame = cv2.resize (merged_frame, (x, y))
                h1 = np.hstack((r_frame, r_frame_mask))
                h2 = np.hstack((r_fga, r_merged_frame))
                f = np.vstack((h1,h2))
                cv2.imshow('display', f)
                #cv2.imshow('merged_frame',cv2.resize(merged_frame, (640,480)))
                #cv2.imshow('frame_mask',cv2.resize(frame_mask, (640,480)))

                #cv2.imshow('frame_mask',frame_mask)
                
                    

        if g.args['interactive']:
            key = cv2.waitKey(0)
        
        else:
            key = cv2.waitKey(1)
        if key& 0xFF == ord('q'):
            exit(1)
        if key& 0xFF == ord('c'):
            g.args['interactive']=False

        if relevant or not g.args['relevantonly']:
            #print ("WRITING FRAME")
            outf.write (merged_frame)

        else:
            #print ('irrelevant frame {}'.format(frame_cnt))
            pass
        
   

    bar_annotate_video.close()
    vid.stop()
    outf.release()

    utils.success_print('annotated file updated in {}'.format(annotate_filename))

    g.json_out.append(set_frames)

    return False