def __init__(self, report_file_path, class_names_file_path, smooth_probs=False, smoothing_factor=16): class_name_map = IO.read_class_names(class_names_file_path) class_header_names = [class_name + '_probability' for class_name in class_name_map.values()] header_mask = ['frame_number', 'frame_timestamp', 'qa_flag'] header_mask.extend(class_header_names) report_header, report_data, data_col_range = IO.read_report( report_file_path, frame_col_num=1, timestamp_col_num=2, qa_flag_col_num=3, header_mask=header_mask, return_data_col_range=True) report_frame_numbers = report_data['frame_numbers'] report_frame_numbers = report_frame_numbers.astype(np.int32) try: report_timestamps = report_data['frame_timestamps'] report_timestamps = report_timestamps.astype(np.int32) qa_flags = report_data['qa_flag'] except: report_timestamps = None qa_flags = None report_probs = report_data['probabilities'] report_probs = report_probs.astype(np.float32) if smooth_probs: report_probs = IO.smooth_probs(report_probs, smoothing_factor) Trip.__init__(self, report_frame_numbers, report_timestamps, qa_flags, report_probs, class_name_map)
async def main(): logging.info('entering snva {} main process'.format(snva_version_string)) # total_num_video_to_process = None def interrupt_handler(signal_number, _): logging.warning('Main process received interrupt signal ' '{}.'.format(signal_number)) main_interrupt_queue.put_nowait('_') # if total_num_video_to_process is None \ # or total_num_video_to_process == len(video_file_paths): # Signal the logging thread to finish up logging.debug('signaling logger thread to end service.') log_queue.put_nowait(None) logger_thread.join() logging.shutdown() signal.signal(signal.SIGINT, interrupt_handler) try: ffmpeg_path = os.environ['FFMPEG_HOME'] except KeyError: logging.warning('Environment variable FFMPEG_HOME not set. Attempting ' 'to use default ffmpeg binary location.') if platform.system() == 'Windows': ffmpeg_path = 'ffmpeg.exe' else: ffmpeg_path = '/usr/local/bin/ffmpeg' if not path.exists(ffmpeg_path): ffmpeg_path = '/usr/bin/ffmpeg' logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path)) try: ffprobe_path = os.environ['FFPROBE_HOME'] except KeyError: logging.warning('Environment variable FFPROBE_HOME not set. ' 'Attempting to use default ffprobe binary location.') if platform.system() == 'Windows': ffprobe_path = 'ffprobe.exe' else: ffprobe_path = '/usr/local/bin/ffprobe' if not path.exists(ffprobe_path): ffprobe_path = '/usr/bin/ffprobe' logging.debug('FFPROBE path set to: {}'.format(ffprobe_path)) # # TODO validate all video file paths in the provided text file if args.inputpath is a text file # if path.isdir(args.inputpath): # video_file_names = set(IO.read_video_file_names(args.inputpath)) # video_file_paths = [path.join(args.inputpath, video_file_name) # for video_file_name in video_file_names] # elif path.isfile(args.inputpath): # if args.inputpath[-3:] == 'txt': # if args.inputlistrootdirpath is None: # raise ValueError('--inputlistrootdirpath must be specified when using a' # ' text file as the input.') # with open(args.inputpath, newline='') as input_file: # video_file_paths = [] # # for line in input_file.readlines(): # line = line.rstrip() # video_file_path = line.lstrip(args.inputlistrootdirpath) # video_file_path = path.join('/media/root', video_file_path) # # if path.isfile(video_file_path): # video_file_paths.append(video_file_path) # else: # logging.warning('The video file at host path {} could not be found ' # 'at mapped path {} and will not be processed'. # format(line, video_file_path)) # else: # video_file_paths = [args.inputpath] # else: # raise ValueError('The video file/folder specified at the path {} could ' # 'not be found.'.format(args.inputpath)) models_root_dir_path = path.join(snva_home, args.modelsdirpath) models_dir_path = path.join(models_root_dir_path, args.modelname) logging.debug('models_dir_path set to {}'.format(models_dir_path)) # model_file_path = path.join(models_dir_path, args.protobuffilename) # # if not path.isfile(model_file_path): # raise ValueError('The model specified at the path {} could not be ' # 'found.'.format(model_file_path)) # # logging.debug('model_file_path set to {}'.format(model_file_path)) model_input_size_file_path = path.join(models_dir_path, 'input_size.txt') if not path.isfile(model_input_size_file_path): raise ValueError( 'The model input size file specified at the path {} ' 'could not be found.'.format(model_input_size_file_path)) logging.debug('model_input_size_file_path set to {}'.format( model_input_size_file_path)) with open(model_input_size_file_path) as file: model_input_size_string = file.readline().rstrip() valid_size_set = ['224', '299'] if model_input_size_string not in valid_size_set: raise ValueError( 'The model input size is not in the set {}.'.format( valid_size_set)) model_input_size = int(model_input_size_string) # if logpath is the default value, expand it using the SNVA_HOME prefix, # otherwise, use the value explicitly passed by the user if args.outputpath == 'reports': output_dir_path = path.join(snva_home, args.outputpath) else: output_dir_path = args.outputpath logging.info("Output path set to: {}".format(output_dir_path)) if not path.isdir(output_dir_path): os.makedirs(output_dir_path) if args.classnamesfilepath is None \ or not path.isfile(args.classnamesfilepath): class_names_path = path.join(models_root_dir_path, 'class_names.txt') else: class_names_path = args.classnamesfilepath logging.debug('labels path set to: {}'.format(class_names_path)) num_processes = args.numprocesses class_name_map = IO.read_class_names(class_names_path) return_code_queue_map = {} child_logger_thread_map = {} child_process_map = {} total_num_processed_videos = 0 total_num_processed_frames = 0 total_analysis_duration = 0 def start_video_processor(video_file_path): # Before popping the next video off of the list and creating a process to # scan it, check to see if fewer than logical_device_count + 1 processes are # active. If not, Wait for a child process to release its semaphore # acquisition. If so, acquire the semaphore, pop the next video name, # create the next child process, and pass the semaphore to it return_code_queue = Queue() return_code_queue_map[video_file_path] = return_code_queue logging.debug('creating new child process.') child_log_queue = Queue() child_logger_thread = Thread(target=child_logger_fn, args=(log_queue, child_log_queue)) child_logger_thread.start() child_logger_thread_map[video_file_path] = child_logger_thread if 'signalstate' == args.processormode: child_process = Process( target=process_video_signalstate, name=path.splitext(path.split(video_file_path)[1])[0], args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost, model_input_size, return_code_queue, child_log_queue, log_level, ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs, args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode)) else: child_process = Process( target=process_video, name=path.splitext(path.split(video_file_path)[1])[0], args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost, model_input_size, return_code_queue, child_log_queue, log_level, ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs, args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode)) logging.debug('starting child process.') child_process.start() child_process_map[video_file_path] = child_process async def close_completed_video_processors(total_num_processed_videos, total_num_processed_frames, total_analysis_duration, websocket_conn): for video_file_path in list(return_code_queue_map.keys()): return_code_queue = return_code_queue_map[video_file_path] try: return_code_map = return_code_queue.get_nowait() return_code = return_code_map['return_code'] return_value = return_code_map['return_value'] child_process = child_process_map[video_file_path] logging.debug( 'child process {} returned with exit code {} and exit value ' '{}'.format(child_process.pid, return_code, return_value)) if return_code == 'success': total_num_processed_videos += 1 total_num_processed_frames += return_value total_analysis_duration += return_code_map[ 'analysis_duration'] logging.info('notifying control node of completion') complete_request = json.dumps({ 'action': 'COMPLETE', 'video': os.path.basename(video_file_path), 'output': return_code_map['output_locations'] }) await websocket_conn.send(complete_request) child_logger_thread = child_logger_thread_map[video_file_path] logging.debug( 'joining logger thread for child process {}'.format( child_process.pid)) child_logger_thread.join(timeout=15) if child_logger_thread.is_alive(): logging.warning( 'logger thread for child process {} remained alive following join ' 'timeout'.format(child_process.pid)) logging.debug('joining child process {}'.format( child_process.pid)) child_process.join(timeout=15) # if the child process has not yet terminated, kill the child process at # the risk of losing any log message not yet buffered by the main logger try: os.kill(child_process.pid, signal.SIGKILL) logging.warning( 'child process {} remained alive following join timeout and had to ' 'be killed'.format(child_process.pid)) except: pass return_code_queue.close() return_code_queue_map.pop(video_file_path) child_logger_thread_map.pop(video_file_path) child_process_map.pop(video_file_path) except Empty: pass return total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration start = time() sleep_duration = 1 breakLoop = False connectionId = None isIdle = False while True: try: if breakLoop: break wsUrl = 'ws://' + args.controlnodehost + '/registerProcess' if connectionId is not None: wsUrl = wsUrl + '?id=' + connectionId logging.debug("Connecting with URL {}".format(wsUrl)) async with ws.connect(wsUrl) as conn: response = await conn.recv() response = json.loads(response) logging.info(response) if response['action'] != 'CONNECTION_SUCCESS': raise ConnectionError( 'control node connection failed with response: {}'. format(response)) if connectionId is None: connectionId = response['id'] logging.debug("Assigned id {}".format(connectionId)) while True: # block if num_processes child processes are active while len(return_code_queue_map) >= num_processes: total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) sleep(sleep_duration) try: # todo poll for termination signal from control node _ = main_interrupt_queue.get_nowait() logging.debug( 'breaking out of child process generation following interrupt signal' ) break except: pass if not isIdle: logging.info('requesting video') request = json.dumps({'action': 'REQUEST_VIDEO'}) await conn.send(request) logging.info('reading response') response = await conn.recv() else: # If idle, we will try to close completed processors until all are done while len(return_code_queue_map) > 0: # Before checking for completed processes, check for a new message logging.info('Checking for new message') try: # If we get a response quickly, break our waiting loop and process the command response = await asyncio.wait_for( conn.recv(), 1) break except asyncio.TimeoutError: # Otherwise, go back to finishing our current tasks logging.debug( 'No new message from control node, continuing...' ) pass total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: sleep(sleep_duration) # Once all are complete, if still idle we have no work left to do - we just wait for a new message response = await conn.recv() response = json.loads(response) if response['action'] == 'STATUS_REQUEST': logging.info('control node requested status request') pass elif response['action'] == 'CEASE_REQUESTS': logging.info( 'control node has no more videos to process') isIdle = True pass elif response['action'] == 'RESUME_REQUESTS': logging.info( 'control node has instructed to resume requests') isIdle = False pass elif response['action'] == 'SHUTDOWN': logging.info('control node requested shutdown') breakLoop = True break elif response['action'] == 'PROCESS': # TODO Prepend input path video_file_path = os.path.join(args.inputpath, response['path']) request_received = json.dumps({ 'action': 'REQUEST_RECEIVED', 'video': response['path'] }) await conn.send(request_received) try: start_video_processor(video_file_path) except Exception as e: logging.error( 'an unknown error has occured while processing {}' .format(video_file_path)) logging.error(e) else: raise ConnectionError( 'control node replied with unexpected response: {}' .format(response)) logging.debug('{} child processes remain enqueued'.format( len(return_code_queue_map))) while len(return_code_queue_map) > 0: #logging.debug('waiting for the final {} child processes to ' # 'terminate'.format(len(return_code_queue_map))) total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: #logging.debug('sleeping for {} seconds'.format(sleep_duration)) sleep(sleep_duration) end = time() - start processing_duration = IO.get_processing_duration( end, 'snva {} processed a total of {} videos and {} frames in:'. format(snva_version_string, total_num_processed_videos, total_num_processed_frames)) logging.info(processing_duration) logging.info( 'Video analysis alone spanned a cumulative {:.02f} ' 'seconds'.format(total_analysis_duration)) logging.info( 'exiting snva {} main process'.format(snva_version_string)) breakLoop = True except socket.gaierror: # log something logging.info('gaierror') continue except ConnectionRefusedError: # log something else logging.info('connection refused') break except ws.exceptions.ConnectionClosed: logging.info('Connection lost. Attempting reconnect...') continue except Exception as e: logging.error("Unknown Exception") logging.error(e) raise e if breakLoop: break
def main(): logging.info('entering snva {} main process'.format(snva_version_string)) total_num_video_to_process = None def interrupt_handler(signal_number, _): logging.warning('Main process received interrupt signal ' '{}.'.format(signal_number)) main_interrupt_queue.put_nowait('_') if total_num_video_to_process is None \ or total_num_video_to_process == len(video_file_paths): # Signal the logging thread to finish up logging.debug('signaling logger thread to end service.') log_queue.put_nowait(None) logger_thread.join() logging.shutdown() signal.signal(signal.SIGINT, interrupt_handler) try: ffmpeg_path = os.environ['FFMPEG_HOME'] except KeyError: logging.warning('Environment variable FFMPEG_HOME not set. Attempting ' 'to use default ffmpeg binary location.') if platform.system() == 'Windows': ffmpeg_path = 'ffmpeg.exe' else: ffmpeg_path = '/usr/local/bin/ffmpeg' if not path.exists(ffmpeg_path): ffmpeg_path = '/usr/bin/ffmpeg' logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path)) try: ffprobe_path = os.environ['FFPROBE_HOME'] except KeyError: logging.warning('Environment variable FFPROBE_HOME not set. ' 'Attempting to use default ffprobe binary location.') if platform.system() == 'Windows': ffprobe_path = 'ffprobe.exe' else: ffprobe_path = '/usr/local/bin/ffprobe' if not path.exists(ffprobe_path): ffprobe_path = '/usr/bin/ffprobe' logging.debug('FFPROBE path set to: {}'.format(ffprobe_path)) # TODO validate all video file paths in the provided text file if args.inputpath is a text file if path.isdir(args.inputpath): video_file_names = set(IO.read_video_file_names(args.inputpath)) video_file_paths = [path.join(args.inputpath, video_file_name) for video_file_name in video_file_names] elif path.isfile(args.inputpath): if args.inputpath[-3:] == 'txt': if args.inputlistrootdirpath is None: raise ValueError('--inputlistrootdirpath must be specified when using a' ' text file as the input.') with open(args.inputpath, newline='') as input_file: video_file_paths = [] for line in input_file.readlines(): line = line.rstrip() video_file_path = line.lstrip(args.inputlistrootdirpath) video_file_path = path.join('/media/root', video_file_path) if path.isfile(video_file_path): video_file_paths.append(video_file_path) else: logging.warning('The video file at host path {} could not be found ' 'at mapped path {} and will not be processed'. format(line, video_file_path)) else: video_file_paths = [args.inputpath] else: raise ValueError('The video file/folder specified at the path {} could ' 'not be found.'.format(args.inputpath)) models_root_dir_path = path.join(snva_home, args.modelsdirpath) models_dir_path = path.join(models_root_dir_path, args.modelname) logging.debug('models_dir_path set to {}'.format(models_dir_path)) model_file_path = path.join(models_dir_path, args.protobuffilename) if not path.isfile(model_file_path): raise ValueError('The model specified at the path {} could not be ' 'found.'.format(model_file_path)) logging.debug('model_file_path set to {}'.format(model_file_path)) model_input_size_file_path = path.join(models_dir_path, 'input_size.txt') if not path.isfile(model_input_size_file_path): raise ValueError('The model input size file specified at the path {} ' 'could not be found.'.format(model_input_size_file_path)) logging.debug('model_input_size_file_path set to {}'.format( model_input_size_file_path)) with open(model_input_size_file_path) as file: model_input_size_string = file.readline().rstrip() valid_size_set = ['224', '299'] if model_input_size_string not in valid_size_set: raise ValueError('The model input size is not in the set {}.'.format( valid_size_set)) model_input_size = int(model_input_size_string) # if logpath is the default value, expand it using the SNVA_HOME prefix, # otherwise, use the value explicitly passed by the user if args.outputpath == 'reports': output_dir_path = path.join(snva_home, args.outputpath) else: output_dir_path = args.outputpath if not path.isdir(output_dir_path): os.makedirs(output_dir_path) if args.excludepreviouslyprocessed: inference_report_dir_path = path.join(output_dir_path, 'inference_reports') if args.writeinferencereports and path.isdir(inference_report_dir_path): inference_report_file_names = os.listdir(inference_report_dir_path) inference_report_file_names = [path.splitext(name)[0] for name in inference_report_file_names] print('previously generated inference reports: {}'.format( inference_report_file_names)) else: inference_report_file_names = None event_report_dir_path = path.join(output_dir_path, 'event_reports') if args.writeeventreports and path.isdir(event_report_dir_path): event_report_file_names = os.listdir(event_report_dir_path) event_report_file_names = [path.splitext(name)[0] for name in event_report_file_names] print('previously generated event reports: {}'.format( event_report_file_names)) else: event_report_file_names = None file_paths_to_exclude = set() for video_file_path in video_file_paths: video_file_name = path.splitext(path.split(video_file_path)[1])[0] if (event_report_file_names and video_file_name in event_report_file_names) \ or (inference_report_file_names and video_file_name in inference_report_file_names): file_paths_to_exclude.add(video_file_path) video_file_paths -= file_paths_to_exclude if args.ionodenamesfilepath is None \ or not path.isfile(args.ionodenamesfilepath): io_node_names_path = path.join(models_dir_path, 'io_node_names.txt') else: io_node_names_path = args.ionodenamesfilepath logging.debug('io tensors path set to: {}'.format(io_node_names_path)) node_name_map = IO.read_node_names(io_node_names_path) if args.classnamesfilepath is None \ or not path.isfile(args.classnamesfilepath): class_names_path = path.join(models_root_dir_path, 'class_names.txt') else: class_names_path = args.classnamesfilepath logging.debug('labels path set to: {}'.format(class_names_path)) if args.cpuonly: device_id_list = ['0'] device_type = 'cpu' else: device_id_list = IO.get_device_ids() device_type = 'gpu' physical_device_count = len(device_id_list) logging.info('Found {} physical {} device(s).'.format( physical_device_count, device_type)) valid_num_processes_list = get_valid_num_processes_per_device(device_type) if args.numprocessesperdevice not in valid_num_processes_list: raise ValueError( 'The the number of processes to assign to each {} device is expected ' 'to be in the set {}.'.format(device_type, valid_num_processes_list)) for i in range(physical_device_count, physical_device_count * args.numprocessesperdevice): device_id_list.append(str(i)) logical_device_count = len(device_id_list) logging.info('Generated an additional {} logical {} device(s).'.format( logical_device_count - physical_device_count, device_type)) # child processes will dequeue and enqueue device names device_id_queue = Queue(logical_device_count) for device_id in device_id_list: device_id_queue.put(device_id) class_name_map = IO.read_class_names(class_names_path) logging.debug('loading model at path: {}'.format(model_file_path)) return_code_queue_map = {} child_logger_thread_map = {} child_process_map = {} total_num_video_to_process = len(video_file_paths) total_num_processed_videos = 0 total_num_processed_frames = 0 total_analysis_duration = 0 logging.info('Processing {} videos using {}'.format( total_num_video_to_process, args.modelname)) def start_video_processor(video_file_path): # Before popping the next video off of the list and creating a process to # scan it, check to see if fewer than logical_device_count + 1 processes are # active. If not, Wait for a child process to release its semaphore # acquisition. If so, acquire the semaphore, pop the next video name, # create the next child process, and pass the semaphore to it video_dir_path, video_file_name = path.split(video_file_path) return_code_queue = Queue() return_code_queue_map[video_file_name] = return_code_queue logging.debug('creating new child process.') child_log_queue = Queue() child_logger_thread = Thread(target=child_logger_fn, args=(log_queue, child_log_queue)) child_logger_thread.start() child_logger_thread_map[video_file_name] = child_logger_thread gpu_memory_fraction = args.gpumemoryfraction / args.numprocessesperdevice child_process = Process( target=process_video, name=path.splitext(video_file_name)[0], args=(video_file_path, output_dir_path, class_name_map, model_input_size, device_id_queue, return_code_queue, child_log_queue, log_level, device_type, logical_device_count, physical_device_count, ffmpeg_path, ffprobe_path, model_file_path, node_name_map, gpu_memory_fraction, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs)) logging.debug('starting child process.') child_process.start() child_process_map[video_file_name] = child_process def close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration): for video_file_name in list(return_code_queue_map.keys()): return_code_queue = return_code_queue_map[video_file_name] try: return_code_map = return_code_queue.get_nowait() return_code = return_code_map['return_code'] return_value = return_code_map['return_value'] child_process = child_process_map[video_file_name] logging.debug( 'child process {} returned with exit code {} and exit value ' '{}'.format(child_process.pid, return_code, return_value)) if return_code == 'success': total_num_processed_videos += 1 total_num_processed_frames += return_value total_analysis_duration += return_code_map['analysis_duration'] child_logger_thread = child_logger_thread_map[video_file_name] logging.debug('joining logger thread for child process {}'.format( child_process.pid)) child_logger_thread.join(timeout=15) if child_logger_thread.is_alive(): logging.warning( 'logger thread for child process {} remained alive following join ' 'timeout'.format(child_process.pid)) logging.debug('joining child process {}'.format(child_process.pid)) child_process.join(timeout=15) # if the child process has not yet terminated, kill the child process at # the risk of losing any log message not yet buffered by the main logger try: os.kill(child_process.pid, signal.SIGKILL) logging.warning( 'child process {} remained alive following join timeout and had to ' 'be killed'.format(child_process.pid)) except: pass return_code_queue.close() return_code_queue_map.pop(video_file_name) child_logger_thread_map.pop(video_file_name) child_process_map.pop(video_file_name) except Empty: pass return total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration start = time() while len(video_file_paths) > 0: # block if logical_device_count + 1 child processes are active while len(return_code_queue_map) > logical_device_count: total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration) try: _ = main_interrupt_queue.get_nowait() logging.debug( 'breaking out of child process generation following interrupt signal') break except: pass video_file_path = video_file_paths.pop() try: start_video_processor(video_file_path) except Exception as e: logging.error('an unknown error has occured while processing ' '{}'.format(video_file_path)) logging.error(e) while len(return_code_queue_map) > 0: logging.debug('waiting for the final {} child processes to ' 'terminate'.format(len(return_code_queue_map))) total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: sleep_duration = 10 logging.debug('sleeping for {} seconds'.format(sleep_duration)) sleep(sleep_duration) end = time() - start processing_duration = IO.get_processing_duration( end, 'snva {} processed a total of {} videos and {} frames in:'.format( snva_version_string, total_num_processed_videos, total_num_processed_frames)) logging.info(processing_duration) logging.info('Video analysis alone spanned a cumulative {:.02f} ' 'seconds'.format(total_analysis_duration)) logging.info('exiting snva {} main process'.format(snva_version_string))