def process_video(video_file_path, output_dir_path, class_name_map, model_name, model_signature_name, model_server_host, model_input_size, return_code_queue, log_queue, log_level, ffmpeg_path, ffprobe_path, do_crop, crop_width, crop_height, crop_x, crop_y, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y, do_deinterlace, num_channels, batch_size, do_smooth_probs, smoothing_factor, do_binarize_probs, do_write_inference_reports, do_write_event_reports, max_threads, processor_mode): configure_logger(log_level, log_queue) interrupt_queue = Queue() # Create a output subdirectory for the current mode output_dir_path = path.join(output_dir_path, processor_mode) def interrupt_handler(signal_number, _): logging.warning('received interrupt signal {}.'.format(signal_number)) interrupt_queue.put_nowait('_') # TODO: cancel timestamp/report generation when an interrupt is signalled # logging.debug('instructing inference pipeline to halt.') # child_interrupt_queue.put_nowait('_') signal.signal(signal.SIGINT, interrupt_handler) video_file_name = path.basename(video_file_path) video_file_name, _ = path.splitext(video_file_name) logging.info('preparing to analyze {}'.format(video_file_path)) output_files = [] try: start = time() frame_width, frame_height, num_frames, _ = IO.get_video_dimensions( video_file_path, ffprobe_path) end = time() - start processing_duration = IO.get_processing_duration( end, 'read video dimensions in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while fetching video ' 'dimensions') logging.error(e) logging.debug( 'will exit with code: exception and value get_video_dimensions') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'get_video_dimensions' }) return_code_queue.close() return try: do_crop = should_crop(frame_width, frame_height, do_crop, crop_width, crop_height, crop_x, crop_y) except Exception as e: logging.error(e) logging.debug('will exit with code: exception and value should_crop') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_crop' }) return_code_queue.close() return logging.debug('Constructing ffmpeg command') ffmpeg_command = [ffmpeg_path, '-i', video_file_path] if do_deinterlace: ffmpeg_command.append('-deinterlace') ffmpeg_command.extend([ '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr', '-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1' ]) try: do_extract_timestamps = should_extract_timestamps( frame_width, frame_height, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y) except Exception as e: logging.error(e) logging.debug( 'will exit with code: exception and value should_extract_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_extract_timestamps' }) return_code_queue.close() return frame_shape = [frame_height, frame_width, num_channels] logging.debug('FFmpeg output frame shape == {}'.format(frame_shape)) #TODO parameterize tf serving values analyzer = VideoAnalyzer(frame_shape, num_frames, len(class_name_map), batch_size, model_name, model_signature_name, model_server_host, model_input_size, do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height, timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height, ffmpeg_command, max_threads) try: start = time() num_analyzed_frames, probability_array, timestamp_array = analyzer.run( ) end = time() analysis_duration = end - start processing_duration = IO.get_processing_duration( analysis_duration, 'processed {} frames in'.format(num_analyzed_frames)) logging.info(processing_duration) if num_analyzed_frames != num_frames: if interrupt_queue.empty(): raise AssertionError('num_analyzed_frames ({}) != num_frames ' '({})'.format(num_analyzed_frames, num_frames)) else: raise InterruptedError( 'num_analyzed_frames ({}) != num_frames ' '({})'.format(num_analyzed_frames, num_frames)) except InterruptedError as ae: logging.error(ae) logging.debug( 'will exit with code: interrupt and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'interrupt', 'return_value': 'analyze_video' }) return_code_queue.close() return except AssertionError as ae: logging.error(ae) logging.debug( 'will exit with code: assertion error and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'assertion error', 'return_value': 'analyze_video' }) return_code_queue.close() return except Exception as e: logging.error( 'encountered an unexpected error while analyzing {}'.format( video_file_name)) logging.error(e) logging.debug( 'will exit with code: exception and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'analyze_video' }) return_code_queue.close() return logging.debug('converting timestamp images to strings') if do_extract_timestamps: try: start = time() timestamp_object = Timestamp(timestamp_height, timestamp_max_width) timestamp_strings, qa_flags = \ timestamp_object.stringify_timestamps(timestamp_array) end = time() - start processing_duration = IO.get_processing_duration( end, 'timestamp strings converted in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while converting ' 'timestamp image crops to strings'.format( os.getpid())) logging.error(e) logging.debug( 'will exit with code: exception and value: stringify_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'stringify_timestamps' }) return_code_queue.close() return else: timestamp_strings = None qa_flags = None logging.debug('attempting to generate reports') if do_write_inference_reports: try: start = time() inf_report = IO.write_inference_report( video_file_name, output_dir_path, analyzer.prob_array, class_name_map, timestamp_strings, qa_flags, do_smooth_probs, smoothing_factor, do_binarize_probs) output_files.append(inf_report) end = time() - start processing_duration = IO.get_processing_duration( end, 'generated inference reports in') logging.info(processing_duration) except Exception as e: logging.error( 'encountered an unexpected error while generating inference report.' ) logging.error(e) logging.debug( 'will exit with code: exception and value: write_inference_report' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'write_inference_report' }) return_code_queue.close() return try: start = time() if do_smooth_probs: probability_array = IO.smooth_probs(probability_array, smoothing_factor) frame_numbers = list(range(1, len(probability_array) + 1)) if timestamp_strings is not None: timestamp_strings = timestamp_strings.astype(np.int32) trip = Trip(frame_numbers, timestamp_strings, qa_flags, probability_array, class_name_map) if processor_mode == "weather": if len(trip.feature_sequence) > 0: logging.info('{} weather events were found in {}'.format( len(trip.feature_sequence), video_file_name)) if do_write_event_reports: weather_rep = IO.write_weather_report( video_file_name, output_dir_path, trip.feature_sequence) output_files.append(weather_rep) else: events = trip.find_work_zone_events() if len(events) > 0: logging.info('{} work zone events were found in {}'.format( len(events), video_file_name)) if do_write_event_reports: event_rep = IO.write_event_report(video_file_name, output_dir_path, events) output_files.append(event_rep) else: logging.info('No work zone events were found in {}'.format( video_file_name)) end = time() - start processing_duration = IO.get_processing_duration( end, 'generated event reports in') logging.info(processing_duration) except Exception as e: logging.error( 'encountered an unexpected error while generating event report.') logging.error(e) logging.debug( 'will exit with code: exception and value: write_event_report') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'write_event_report' }) return_code_queue.close() return logging.debug('will exit with code: success and value: {}'.format( num_analyzed_frames)) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'success', 'return_value': num_analyzed_frames, 'analysis_duration': analysis_duration, 'output_locations': str(output_files) }) return_code_queue.close()
def process_video_signalstate( video_file_path, output_dir_path, class_name_map, model_name, model_signature_name, model_server_host, model_input_size, return_code_queue, log_queue, log_level, ffmpeg_path, ffprobe_path, do_crop, crop_width, crop_height, crop_x, crop_y, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y, do_deinterlace, num_channels, batch_size, do_smooth_probs, smoothing_factor, do_binarize_probs, do_write_bbox_reports, do_write_event_reports, max_threads, processor_mode): configure_logger(log_level, log_queue) interrupt_queue = Queue() # Create a output subdirectory for the current mode output_dir_path = path.join(output_dir_path, processor_mode) output_files = [] def interrupt_handler(signal_number, _): logging.warning('received interrupt signal {}.'.format(signal_number)) interrupt_queue.put_nowait('_') # TODO: cancel timestamp/report generation when an interrupt is signalled # logging.debug('instructing inference pipeline to halt.') # child_interrupt_queue.put_nowait('_') signal.signal(signal.SIGINT, interrupt_handler) video_file_name = path.basename(video_file_path) video_file_name, _ = path.splitext(video_file_name) logging.info('preparing to signalstate analyze {}'.format(video_file_path)) try: start = time() # For signal state, we use duration as num_frames, as we will only grab one frame per second frame_width, frame_height, num_frames, duration = IO.get_video_dimensions( video_file_path, ffprobe_path) num_frames = duration end = time() - start processing_duration = IO.get_processing_duration( end, 'read video dimensions in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while fetching video ' 'dimensions') logging.error(e) logging.debug( 'will exit with code: exception and value get_video_dimensions') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'get_video_dimensions' }) return_code_queue.close() return try: do_crop = should_crop(frame_width, frame_height, do_crop, crop_width, crop_height, crop_x, crop_y) except Exception as e: logging.error(e) logging.debug('will exit with code: exception and value should_crop') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_crop' }) return_code_queue.close() return logging.debug('Constructing ffmpeg command') ffmpeg_command = [ffmpeg_path, '-i', video_file_path] if do_deinterlace: ffmpeg_command.append('-deinterlace') ffmpeg_command.extend([ '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr', '-hide_banner', '-loglevel', '0', '-r', '1', '-f', 'image2pipe', 'pipe:1' ]) try: do_extract_timestamps = should_extract_timestamps( frame_width, frame_height, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y) except Exception as e: logging.error(e) logging.debug( 'will exit with code: exception and value should_extract_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_extract_timestamps' }) return_code_queue.close() return frame_shape = [frame_height, frame_width, num_channels] logging.debug('FFmpeg output frame shape == {}'.format(frame_shape)) analyzer = SignalVideoAnalyzer( frame_shape, num_frames, len(class_name_map), batch_size, model_name, model_signature_name, model_server_host, model_input_size, do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height, timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height, ffmpeg_command, max_threads) try: start = time() num_analyzed_frames, frame_map_array, timestamp_array = analyzer.run() end = time() analysis_duration = end - start processing_duration = IO.get_processing_duration( analysis_duration, 'processed {} frames in'.format(num_analyzed_frames)) logging.info(processing_duration) except InterruptedError as ae: logging.error(ae) logging.debug( 'will exit with code: interrupt and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'interrupt', 'return_value': 'analyze_video' }) return_code_queue.close() return except AssertionError as ae: logging.error(ae) logging.debug( 'will exit with code: assertion error and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'assertion error', 'return_value': 'analyze_video' }) return_code_queue.close() return except Exception as e: logging.error( 'encountered an unexpected error while analyzing {}'.format( video_file_name)) logging.error(e) logging.debug( 'will exit with code: exception and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'analyze_video' }) return_code_queue.close() return logging.debug('converting timestamp images to strings') if do_extract_timestamps: try: start = time() timestamp_object = Timestamp(timestamp_height, timestamp_max_width) timestamp_strings, qa_flags = \ timestamp_object.stringify_timestamps(timestamp_array) end = time() - start processing_duration = IO.get_processing_duration( end, 'timestamp strings converted in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while converting ' 'timestamp image crops to strings'.format( os.getpid())) logging.error(e) logging.debug( 'will exit with code: exception and value: stringify_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'stringify_timestamps' }) return_code_queue.close() return else: timestamp_strings = None qa_flags = None logging.debug('attempting to generate reports') if do_write_bbox_reports: json_data = [] for frame_num, frame_map in enumerate(frame_map_array, start=0): if timestamp_strings is not None: timestamp = timestamp_strings[frame_num] else: timestamp = None for i in range(0, frame_map['num_detections']): class_name = class_name_map[frame_map['detection_classes'][i]] bbox = frame_map['detection_boxes'][i] json_data.append({ 'frame_num': int(frame_num), 'video_name': video_file_name, 'timestamp': int(timestamp), 'class_name': class_name, 'detection_boxes': bbox.tolist(), 'detection_score': float(frame_map['detection_scores'][i]) }) bbox_rep = IO.write_json(video_file_name + 'BBOX', output_dir_path, json_data) output_files.append(bbox_rep) try: start = time() frame_numbers = list(range(1, len(frame_map_array) + 1)) if timestamp_strings is not None: timestamp_strings = timestamp_strings.astype(np.int32) # Process our raw predictions into a list of bounding boxes and frame data detections = [] for frame_num, frame_map in enumerate(frame_map_array, start=0): if timestamp_strings is not None: timestamp = timestamp_strings[frame_num] else: timestamp = None for i in range(0, frame_map['num_detections']): class_name = class_name_map[frame_map['detection_classes'][i]] bbox = frame_map['detection_boxes'][i] xtl = bbox[1] * frame_width ytl = bbox[0] * frame_height xbr = bbox[3] * frame_width ybr = bbox[2] * frame_height detections.append({'frame_num': frame_num, 'timestamp': timestamp, 'classification': class_name, \ 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr}) if len(detections) > 0: logging.info('{} signal state detections were found in {}'.format( len(detections), video_file_name)) if do_write_event_reports: evt_rep = IO.write_signalstate_report(video_file_name, output_dir_path, detections) output_files.append(evt_rep) else: logging.info('No signal state events were found in {}'.format( video_file_name)) end = time() - start processing_duration = IO.get_processing_duration( end, 'generated event reports in') logging.info(processing_duration) except Exception as e: logging.error( 'encountered an unexpected error while generating event report.') logging.error(e) logging.debug( 'will exit with code: exception and value: write_event_report') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'write_event_report' }) return_code_queue.close() return logging.debug('will exit with code: success and value: {}'.format( num_analyzed_frames)) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'success', 'return_value': num_analyzed_frames, 'analysis_duration': analysis_duration, 'output_locations': str(output_files) }) return_code_queue.close()
async def main(): logging.info('entering snva {} main process'.format(snva_version_string)) # total_num_video_to_process = None def interrupt_handler(signal_number, _): logging.warning('Main process received interrupt signal ' '{}.'.format(signal_number)) main_interrupt_queue.put_nowait('_') # if total_num_video_to_process is None \ # or total_num_video_to_process == len(video_file_paths): # Signal the logging thread to finish up logging.debug('signaling logger thread to end service.') log_queue.put_nowait(None) logger_thread.join() logging.shutdown() signal.signal(signal.SIGINT, interrupt_handler) try: ffmpeg_path = os.environ['FFMPEG_HOME'] except KeyError: logging.warning('Environment variable FFMPEG_HOME not set. Attempting ' 'to use default ffmpeg binary location.') if platform.system() == 'Windows': ffmpeg_path = 'ffmpeg.exe' else: ffmpeg_path = '/usr/local/bin/ffmpeg' if not path.exists(ffmpeg_path): ffmpeg_path = '/usr/bin/ffmpeg' logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path)) try: ffprobe_path = os.environ['FFPROBE_HOME'] except KeyError: logging.warning('Environment variable FFPROBE_HOME not set. ' 'Attempting to use default ffprobe binary location.') if platform.system() == 'Windows': ffprobe_path = 'ffprobe.exe' else: ffprobe_path = '/usr/local/bin/ffprobe' if not path.exists(ffprobe_path): ffprobe_path = '/usr/bin/ffprobe' logging.debug('FFPROBE path set to: {}'.format(ffprobe_path)) # # TODO validate all video file paths in the provided text file if args.inputpath is a text file # if path.isdir(args.inputpath): # video_file_names = set(IO.read_video_file_names(args.inputpath)) # video_file_paths = [path.join(args.inputpath, video_file_name) # for video_file_name in video_file_names] # elif path.isfile(args.inputpath): # if args.inputpath[-3:] == 'txt': # if args.inputlistrootdirpath is None: # raise ValueError('--inputlistrootdirpath must be specified when using a' # ' text file as the input.') # with open(args.inputpath, newline='') as input_file: # video_file_paths = [] # # for line in input_file.readlines(): # line = line.rstrip() # video_file_path = line.lstrip(args.inputlistrootdirpath) # video_file_path = path.join('/media/root', video_file_path) # # if path.isfile(video_file_path): # video_file_paths.append(video_file_path) # else: # logging.warning('The video file at host path {} could not be found ' # 'at mapped path {} and will not be processed'. # format(line, video_file_path)) # else: # video_file_paths = [args.inputpath] # else: # raise ValueError('The video file/folder specified at the path {} could ' # 'not be found.'.format(args.inputpath)) models_root_dir_path = path.join(snva_home, args.modelsdirpath) models_dir_path = path.join(models_root_dir_path, args.modelname) logging.debug('models_dir_path set to {}'.format(models_dir_path)) # model_file_path = path.join(models_dir_path, args.protobuffilename) # # if not path.isfile(model_file_path): # raise ValueError('The model specified at the path {} could not be ' # 'found.'.format(model_file_path)) # # logging.debug('model_file_path set to {}'.format(model_file_path)) model_input_size_file_path = path.join(models_dir_path, 'input_size.txt') if not path.isfile(model_input_size_file_path): raise ValueError( 'The model input size file specified at the path {} ' 'could not be found.'.format(model_input_size_file_path)) logging.debug('model_input_size_file_path set to {}'.format( model_input_size_file_path)) with open(model_input_size_file_path) as file: model_input_size_string = file.readline().rstrip() valid_size_set = ['224', '299'] if model_input_size_string not in valid_size_set: raise ValueError( 'The model input size is not in the set {}.'.format( valid_size_set)) model_input_size = int(model_input_size_string) # if logpath is the default value, expand it using the SNVA_HOME prefix, # otherwise, use the value explicitly passed by the user if args.outputpath == 'reports': output_dir_path = path.join(snva_home, args.outputpath) else: output_dir_path = args.outputpath logging.info("Output path set to: {}".format(output_dir_path)) if not path.isdir(output_dir_path): os.makedirs(output_dir_path) if args.classnamesfilepath is None \ or not path.isfile(args.classnamesfilepath): class_names_path = path.join(models_root_dir_path, 'class_names.txt') else: class_names_path = args.classnamesfilepath logging.debug('labels path set to: {}'.format(class_names_path)) num_processes = args.numprocesses class_name_map = IO.read_class_names(class_names_path) return_code_queue_map = {} child_logger_thread_map = {} child_process_map = {} total_num_processed_videos = 0 total_num_processed_frames = 0 total_analysis_duration = 0 def start_video_processor(video_file_path): # Before popping the next video off of the list and creating a process to # scan it, check to see if fewer than logical_device_count + 1 processes are # active. If not, Wait for a child process to release its semaphore # acquisition. If so, acquire the semaphore, pop the next video name, # create the next child process, and pass the semaphore to it return_code_queue = Queue() return_code_queue_map[video_file_path] = return_code_queue logging.debug('creating new child process.') child_log_queue = Queue() child_logger_thread = Thread(target=child_logger_fn, args=(log_queue, child_log_queue)) child_logger_thread.start() child_logger_thread_map[video_file_path] = child_logger_thread if 'signalstate' == args.processormode: child_process = Process( target=process_video_signalstate, name=path.splitext(path.split(video_file_path)[1])[0], args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost, model_input_size, return_code_queue, child_log_queue, log_level, ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs, args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode)) else: child_process = Process( target=process_video, name=path.splitext(path.split(video_file_path)[1])[0], args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost, model_input_size, return_code_queue, child_log_queue, log_level, ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs, args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode)) logging.debug('starting child process.') child_process.start() child_process_map[video_file_path] = child_process async def close_completed_video_processors(total_num_processed_videos, total_num_processed_frames, total_analysis_duration, websocket_conn): for video_file_path in list(return_code_queue_map.keys()): return_code_queue = return_code_queue_map[video_file_path] try: return_code_map = return_code_queue.get_nowait() return_code = return_code_map['return_code'] return_value = return_code_map['return_value'] child_process = child_process_map[video_file_path] logging.debug( 'child process {} returned with exit code {} and exit value ' '{}'.format(child_process.pid, return_code, return_value)) if return_code == 'success': total_num_processed_videos += 1 total_num_processed_frames += return_value total_analysis_duration += return_code_map[ 'analysis_duration'] logging.info('notifying control node of completion') complete_request = json.dumps({ 'action': 'COMPLETE', 'video': os.path.basename(video_file_path), 'output': return_code_map['output_locations'] }) await websocket_conn.send(complete_request) child_logger_thread = child_logger_thread_map[video_file_path] logging.debug( 'joining logger thread for child process {}'.format( child_process.pid)) child_logger_thread.join(timeout=15) if child_logger_thread.is_alive(): logging.warning( 'logger thread for child process {} remained alive following join ' 'timeout'.format(child_process.pid)) logging.debug('joining child process {}'.format( child_process.pid)) child_process.join(timeout=15) # if the child process has not yet terminated, kill the child process at # the risk of losing any log message not yet buffered by the main logger try: os.kill(child_process.pid, signal.SIGKILL) logging.warning( 'child process {} remained alive following join timeout and had to ' 'be killed'.format(child_process.pid)) except: pass return_code_queue.close() return_code_queue_map.pop(video_file_path) child_logger_thread_map.pop(video_file_path) child_process_map.pop(video_file_path) except Empty: pass return total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration start = time() sleep_duration = 1 breakLoop = False connectionId = None isIdle = False while True: try: if breakLoop: break wsUrl = 'ws://' + args.controlnodehost + '/registerProcess' if connectionId is not None: wsUrl = wsUrl + '?id=' + connectionId logging.debug("Connecting with URL {}".format(wsUrl)) async with ws.connect(wsUrl) as conn: response = await conn.recv() response = json.loads(response) logging.info(response) if response['action'] != 'CONNECTION_SUCCESS': raise ConnectionError( 'control node connection failed with response: {}'. format(response)) if connectionId is None: connectionId = response['id'] logging.debug("Assigned id {}".format(connectionId)) while True: # block if num_processes child processes are active while len(return_code_queue_map) >= num_processes: total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) sleep(sleep_duration) try: # todo poll for termination signal from control node _ = main_interrupt_queue.get_nowait() logging.debug( 'breaking out of child process generation following interrupt signal' ) break except: pass if not isIdle: logging.info('requesting video') request = json.dumps({'action': 'REQUEST_VIDEO'}) await conn.send(request) logging.info('reading response') response = await conn.recv() else: # If idle, we will try to close completed processors until all are done while len(return_code_queue_map) > 0: # Before checking for completed processes, check for a new message logging.info('Checking for new message') try: # If we get a response quickly, break our waiting loop and process the command response = await asyncio.wait_for( conn.recv(), 1) break except asyncio.TimeoutError: # Otherwise, go back to finishing our current tasks logging.debug( 'No new message from control node, continuing...' ) pass total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: sleep(sleep_duration) # Once all are complete, if still idle we have no work left to do - we just wait for a new message response = await conn.recv() response = json.loads(response) if response['action'] == 'STATUS_REQUEST': logging.info('control node requested status request') pass elif response['action'] == 'CEASE_REQUESTS': logging.info( 'control node has no more videos to process') isIdle = True pass elif response['action'] == 'RESUME_REQUESTS': logging.info( 'control node has instructed to resume requests') isIdle = False pass elif response['action'] == 'SHUTDOWN': logging.info('control node requested shutdown') breakLoop = True break elif response['action'] == 'PROCESS': # TODO Prepend input path video_file_path = os.path.join(args.inputpath, response['path']) request_received = json.dumps({ 'action': 'REQUEST_RECEIVED', 'video': response['path'] }) await conn.send(request_received) try: start_video_processor(video_file_path) except Exception as e: logging.error( 'an unknown error has occured while processing {}' .format(video_file_path)) logging.error(e) else: raise ConnectionError( 'control node replied with unexpected response: {}' .format(response)) logging.debug('{} child processes remain enqueued'.format( len(return_code_queue_map))) while len(return_code_queue_map) > 0: #logging.debug('waiting for the final {} child processes to ' # 'terminate'.format(len(return_code_queue_map))) total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = await close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration, conn) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: #logging.debug('sleeping for {} seconds'.format(sleep_duration)) sleep(sleep_duration) end = time() - start processing_duration = IO.get_processing_duration( end, 'snva {} processed a total of {} videos and {} frames in:'. format(snva_version_string, total_num_processed_videos, total_num_processed_frames)) logging.info(processing_duration) logging.info( 'Video analysis alone spanned a cumulative {:.02f} ' 'seconds'.format(total_analysis_duration)) logging.info( 'exiting snva {} main process'.format(snva_version_string)) breakLoop = True except socket.gaierror: # log something logging.info('gaierror') continue except ConnectionRefusedError: # log something else logging.info('connection refused') break except ws.exceptions.ConnectionClosed: logging.info('Connection lost. Attempting reconnect...') continue except Exception as e: logging.error("Unknown Exception") logging.error(e) raise e if breakLoop: break
def process_video(video_file_path, output_dir_path, class_name_map, model_input_size, device_id_queue, return_code_queue, log_queue, log_level, device_type, logical_device_count, physical_device_count, ffmpeg_path, ffprobe_path, model_path, node_name_map, gpu_memory_fraction, do_crop, crop_width, crop_height, crop_x, crop_y, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y, do_deinterlace, num_channels, batch_size, do_smooth_probs, smoothing_factor, do_binarize_probs): configure_logger(log_level, log_queue) interrupt_queue = Queue() child_interrupt_queue = Queue() def interrupt_handler(signal_number, _): logging.warning('received interrupt signal {}.'.format(signal_number)) interrupt_queue.put_nowait('_') # TODO: cancel timestamp/report generation when an interrupt is signalled logging.debug('instructing inference pipeline to halt.') child_interrupt_queue.put_nowait('_') signal.signal(signal.SIGINT, interrupt_handler) video_file_name = path.basename(video_file_path) video_file_name, _ = path.splitext(video_file_name) logging.info('preparing to analyze {}'.format(video_file_path)) try: start = time() frame_width, frame_height, num_frames = IO.get_video_dimensions( video_file_path, ffprobe_path) end = time() - start processing_duration = IO.get_processing_duration( end, 'read video dimensions in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while fetching video ' 'dimensions') logging.error(e) logging.debug( 'will exit with code: exception and value get_video_dimensions') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'get_video_dimensions' }) return_code_queue.close() return try: do_crop = should_crop(frame_width, frame_height, do_crop, crop_width, crop_height, crop_x, crop_y) except Exception as e: logging.error(e) logging.debug('will exit with code: exception and value should_crop') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_crop' }) return_code_queue.close() return logging.debug('Constructing ffmpeg command') ffmpeg_command = [ffmpeg_path, '-i', video_file_path] if do_deinterlace: ffmpeg_command.append('-deinterlace') ffmpeg_command.extend([ '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr', '-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1' ]) try: do_extract_timestamps = should_extract_timestamps( frame_width, frame_height, do_extract_timestamps, timestamp_max_width, timestamp_height, timestamp_x, timestamp_y) except Exception as e: logging.error(e) logging.debug( 'will exit with code: exception and value should_extract_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'should_extract_timestamps' }) return_code_queue.close() return frame_shape = [frame_height, frame_width, num_channels] logging.debug('FFmpeg output frame shape == {}'.format(frame_shape)) def release_device_id(device_id, device_id_queue): try: logging.debug( 'attempting to unset CUDA_VISIBLE_DEVICES environment variable.' ) os.environ.pop('CUDA_VISIBLE_DEVICES') except KeyError as ke: logging.warning(ke) logging.debug('released {} device with id {}'.format( device_type, device_id)) device_id_queue.put(device_id) device_id_queue.close() result_queue = Queue(1) analyzer = VideoAnalyzer( frame_shape, num_frames, len(class_name_map), batch_size, model_input_size, model_path, device_type, logical_device_count, os.cpu_count(), node_name_map, gpu_memory_fraction, do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height, timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height, ffmpeg_command, child_interrupt_queue, result_queue, video_file_name) device_id = device_id_queue.get() logging.debug('acquired {} device with id {}'.format( device_type, device_id)) try: _ = child_interrupt_queue.get_nowait() release_device_id(device_id, device_id_queue) logging.debug( 'will exit with code: interrupt and value: process_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'interrupt', 'return_value': 'process_video' }) return_code_queue.close() return except: pass if device_type == 'gpu': mapped_device_id = str(int(device_id) % physical_device_count) logging.debug( 'mapped logical device_id {} to physical device_id {}'.format( device_id, mapped_device_id)) logging.debug('setting CUDA_VISIBLE_DEVICES environment variable to ' '{}.'.format(mapped_device_id)) os.environ['CUDA_VISIBLE_DEVICES'] = mapped_device_id else: logging.debug( 'Setting CUDA_VISIBLE_DEVICES environment variable to None.') os.environ['CUDA_VISIBLE_DEVICES'] = '' try: start = time() analyzer.start() num_analyzed_frames, probability_array, timestamp_array = result_queue.get( ) analyzer.terminate() result_queue.close() end = time() analysis_duration = end - start processing_duration = IO.get_processing_duration( analysis_duration, 'processed {} frames in'.format(num_analyzed_frames)) logging.info(processing_duration) analyzer.join(timeout=15) try: os.kill(analyzer.pid, signal.SIGKILL) logging.debug( 'analyzer process {} remained alive following join timeout ' 'and had to be killed'.format(analyzer.pid)) except: pass release_device_id(device_id, device_id_queue) if num_analyzed_frames != num_frames: if interrupt_queue.empty(): raise AssertionError('num_analyzed_frames ({}) != num_frames ' '({})'.format(num_analyzed_frames, num_frames)) else: raise InterruptedError( 'num_analyzed_frames ({}) != num_frames ' '({})'.format(num_analyzed_frames, num_frames)) except InterruptedError as ae: logging.error(ae) release_device_id(device_id, device_id_queue) logging.debug( 'will exit with code: interrupt and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'interrupt', 'return_value': 'analyze_video' }) return_code_queue.close() return except AssertionError as ae: logging.error(ae) release_device_id(device_id, device_id_queue) logging.debug( 'will exit with code: assertion error and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'assertion error', 'return_value': 'analyze_video' }) return_code_queue.close() return except Exception as e: logging.error( 'encountered an unexpected error while analyzing {}'.format( video_file_name)) logging.error(e) release_device_id(device_id, device_id_queue) logging.debug( 'will exit with code: exception and value: analyze_video') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'analyze_video' }) return_code_queue.close() return logging.debug('converting timestamp images to strings') if do_extract_timestamps: try: start = time() timestamp_object = Timestamp(timestamp_height, timestamp_max_width) timestamp_strings, qa_flags = \ timestamp_object.stringify_timestamps(timestamp_array) end = time() - start processing_duration = IO.get_processing_duration( end, 'timestamp strings converted in') logging.info(processing_duration) except Exception as e: logging.error('encountered an unexpected error while converting ' 'timestamp image crops to strings'.format( os.getpid())) logging.error(e) logging.debug( 'will exit with code: exception and value: stringify_timestamps' ) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'stringify_timestamps' }) return_code_queue.close() return else: timestamp_strings = None qa_flags = None logging.debug('attempting to generate reports') try: start = time() IO.write_inference_report(video_file_name, output_dir_path, probability_array, class_name_map, timestamp_strings, qa_flags, do_smooth_probs, smoothing_factor, do_binarize_probs) end = time() - start processing_duration = IO.get_processing_duration( end, 'generated inference reports in') logging.info(processing_duration) except Exception as e: logging.error( 'encountered an unexpected error while generating inference report.' ) logging.error(e) logging.debug( 'will exit with code: exception and value: write_inference_report') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'write_inference_report' }) return_code_queue.close() return try: start = time() if do_smooth_probs: probability_array = IO.smooth_probs(probability_array, smoothing_factor) frame_numbers = [i + 1 for i in range(len(probability_array))] if timestamp_strings is not None: timestamp_strings = timestamp_strings.astype(np.int32) trip = Trip(frame_numbers, timestamp_strings, qa_flags, probability_array, class_name_map) work_zone_events = trip.find_work_zone_events() if len(work_zone_events) > 0: logging.info('{} work zone events were found in {}'.format( len(work_zone_events), video_file_name)) IO.write_event_report(video_file_name, output_dir_path, work_zone_events) else: logging.info( 'No work zone events were found in {}'.format(video_file_name)) end = time() - start processing_duration = IO.get_processing_duration( end, 'generated event reports in') logging.info(processing_duration) except Exception as e: logging.error( 'encountered an unexpected error while generating event report.') logging.error(e) logging.debug( 'will exit with code: exception and value: write_event_report') log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'exception', 'return_value': 'write_event_report' }) return_code_queue.close() return logging.debug('will exit with code: success and value: {}'.format( num_analyzed_frames)) log_queue.put(None) log_queue.close() return_code_queue.put({ 'return_code': 'success', 'return_value': num_analyzed_frames, 'analysis_duration': analysis_duration }) return_code_queue.close()
def main(): logging.info('entering snva {} main process'.format(snva_version_string)) total_num_video_to_process = None def interrupt_handler(signal_number, _): logging.warning('Main process received interrupt signal ' '{}.'.format(signal_number)) main_interrupt_queue.put_nowait('_') if total_num_video_to_process is None \ or total_num_video_to_process == len(video_file_paths): # Signal the logging thread to finish up logging.debug('signaling logger thread to end service.') log_queue.put_nowait(None) logger_thread.join() logging.shutdown() signal.signal(signal.SIGINT, interrupt_handler) try: ffmpeg_path = os.environ['FFMPEG_HOME'] except KeyError: logging.warning('Environment variable FFMPEG_HOME not set. Attempting ' 'to use default ffmpeg binary location.') if platform.system() == 'Windows': ffmpeg_path = 'ffmpeg.exe' else: ffmpeg_path = '/usr/local/bin/ffmpeg' if not path.exists(ffmpeg_path): ffmpeg_path = '/usr/bin/ffmpeg' logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path)) try: ffprobe_path = os.environ['FFPROBE_HOME'] except KeyError: logging.warning('Environment variable FFPROBE_HOME not set. ' 'Attempting to use default ffprobe binary location.') if platform.system() == 'Windows': ffprobe_path = 'ffprobe.exe' else: ffprobe_path = '/usr/local/bin/ffprobe' if not path.exists(ffprobe_path): ffprobe_path = '/usr/bin/ffprobe' logging.debug('FFPROBE path set to: {}'.format(ffprobe_path)) # TODO validate all video file paths in the provided text file if args.inputpath is a text file if path.isdir(args.inputpath): video_file_names = set(IO.read_video_file_names(args.inputpath)) video_file_paths = [path.join(args.inputpath, video_file_name) for video_file_name in video_file_names] elif path.isfile(args.inputpath): if args.inputpath[-3:] == 'txt': if args.inputlistrootdirpath is None: raise ValueError('--inputlistrootdirpath must be specified when using a' ' text file as the input.') with open(args.inputpath, newline='') as input_file: video_file_paths = [] for line in input_file.readlines(): line = line.rstrip() video_file_path = line.lstrip(args.inputlistrootdirpath) video_file_path = path.join('/media/root', video_file_path) if path.isfile(video_file_path): video_file_paths.append(video_file_path) else: logging.warning('The video file at host path {} could not be found ' 'at mapped path {} and will not be processed'. format(line, video_file_path)) else: video_file_paths = [args.inputpath] else: raise ValueError('The video file/folder specified at the path {} could ' 'not be found.'.format(args.inputpath)) models_root_dir_path = path.join(snva_home, args.modelsdirpath) models_dir_path = path.join(models_root_dir_path, args.modelname) logging.debug('models_dir_path set to {}'.format(models_dir_path)) model_file_path = path.join(models_dir_path, args.protobuffilename) if not path.isfile(model_file_path): raise ValueError('The model specified at the path {} could not be ' 'found.'.format(model_file_path)) logging.debug('model_file_path set to {}'.format(model_file_path)) model_input_size_file_path = path.join(models_dir_path, 'input_size.txt') if not path.isfile(model_input_size_file_path): raise ValueError('The model input size file specified at the path {} ' 'could not be found.'.format(model_input_size_file_path)) logging.debug('model_input_size_file_path set to {}'.format( model_input_size_file_path)) with open(model_input_size_file_path) as file: model_input_size_string = file.readline().rstrip() valid_size_set = ['224', '299'] if model_input_size_string not in valid_size_set: raise ValueError('The model input size is not in the set {}.'.format( valid_size_set)) model_input_size = int(model_input_size_string) # if logpath is the default value, expand it using the SNVA_HOME prefix, # otherwise, use the value explicitly passed by the user if args.outputpath == 'reports': output_dir_path = path.join(snva_home, args.outputpath) else: output_dir_path = args.outputpath if not path.isdir(output_dir_path): os.makedirs(output_dir_path) if args.excludepreviouslyprocessed: inference_report_dir_path = path.join(output_dir_path, 'inference_reports') if args.writeinferencereports and path.isdir(inference_report_dir_path): inference_report_file_names = os.listdir(inference_report_dir_path) inference_report_file_names = [path.splitext(name)[0] for name in inference_report_file_names] print('previously generated inference reports: {}'.format( inference_report_file_names)) else: inference_report_file_names = None event_report_dir_path = path.join(output_dir_path, 'event_reports') if args.writeeventreports and path.isdir(event_report_dir_path): event_report_file_names = os.listdir(event_report_dir_path) event_report_file_names = [path.splitext(name)[0] for name in event_report_file_names] print('previously generated event reports: {}'.format( event_report_file_names)) else: event_report_file_names = None file_paths_to_exclude = set() for video_file_path in video_file_paths: video_file_name = path.splitext(path.split(video_file_path)[1])[0] if (event_report_file_names and video_file_name in event_report_file_names) \ or (inference_report_file_names and video_file_name in inference_report_file_names): file_paths_to_exclude.add(video_file_path) video_file_paths -= file_paths_to_exclude if args.ionodenamesfilepath is None \ or not path.isfile(args.ionodenamesfilepath): io_node_names_path = path.join(models_dir_path, 'io_node_names.txt') else: io_node_names_path = args.ionodenamesfilepath logging.debug('io tensors path set to: {}'.format(io_node_names_path)) node_name_map = IO.read_node_names(io_node_names_path) if args.classnamesfilepath is None \ or not path.isfile(args.classnamesfilepath): class_names_path = path.join(models_root_dir_path, 'class_names.txt') else: class_names_path = args.classnamesfilepath logging.debug('labels path set to: {}'.format(class_names_path)) if args.cpuonly: device_id_list = ['0'] device_type = 'cpu' else: device_id_list = IO.get_device_ids() device_type = 'gpu' physical_device_count = len(device_id_list) logging.info('Found {} physical {} device(s).'.format( physical_device_count, device_type)) valid_num_processes_list = get_valid_num_processes_per_device(device_type) if args.numprocessesperdevice not in valid_num_processes_list: raise ValueError( 'The the number of processes to assign to each {} device is expected ' 'to be in the set {}.'.format(device_type, valid_num_processes_list)) for i in range(physical_device_count, physical_device_count * args.numprocessesperdevice): device_id_list.append(str(i)) logical_device_count = len(device_id_list) logging.info('Generated an additional {} logical {} device(s).'.format( logical_device_count - physical_device_count, device_type)) # child processes will dequeue and enqueue device names device_id_queue = Queue(logical_device_count) for device_id in device_id_list: device_id_queue.put(device_id) class_name_map = IO.read_class_names(class_names_path) logging.debug('loading model at path: {}'.format(model_file_path)) return_code_queue_map = {} child_logger_thread_map = {} child_process_map = {} total_num_video_to_process = len(video_file_paths) total_num_processed_videos = 0 total_num_processed_frames = 0 total_analysis_duration = 0 logging.info('Processing {} videos using {}'.format( total_num_video_to_process, args.modelname)) def start_video_processor(video_file_path): # Before popping the next video off of the list and creating a process to # scan it, check to see if fewer than logical_device_count + 1 processes are # active. If not, Wait for a child process to release its semaphore # acquisition. If so, acquire the semaphore, pop the next video name, # create the next child process, and pass the semaphore to it video_dir_path, video_file_name = path.split(video_file_path) return_code_queue = Queue() return_code_queue_map[video_file_name] = return_code_queue logging.debug('creating new child process.') child_log_queue = Queue() child_logger_thread = Thread(target=child_logger_fn, args=(log_queue, child_log_queue)) child_logger_thread.start() child_logger_thread_map[video_file_name] = child_logger_thread gpu_memory_fraction = args.gpumemoryfraction / args.numprocessesperdevice child_process = Process( target=process_video, name=path.splitext(video_file_name)[0], args=(video_file_path, output_dir_path, class_name_map, model_input_size, device_id_queue, return_code_queue, child_log_queue, log_level, device_type, logical_device_count, physical_device_count, ffmpeg_path, ffprobe_path, model_file_path, node_name_map, gpu_memory_fraction, args.crop, args.cropwidth, args.cropheight, args.cropx, args.cropy, args.extracttimestamps, args.timestampmaxwidth, args.timestampheight, args.timestampx, args.timestampy, args.deinterlace, args.numchannels, args.batchsize, args.smoothprobs, args.smoothingfactor, args.binarizeprobs)) logging.debug('starting child process.') child_process.start() child_process_map[video_file_name] = child_process def close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration): for video_file_name in list(return_code_queue_map.keys()): return_code_queue = return_code_queue_map[video_file_name] try: return_code_map = return_code_queue.get_nowait() return_code = return_code_map['return_code'] return_value = return_code_map['return_value'] child_process = child_process_map[video_file_name] logging.debug( 'child process {} returned with exit code {} and exit value ' '{}'.format(child_process.pid, return_code, return_value)) if return_code == 'success': total_num_processed_videos += 1 total_num_processed_frames += return_value total_analysis_duration += return_code_map['analysis_duration'] child_logger_thread = child_logger_thread_map[video_file_name] logging.debug('joining logger thread for child process {}'.format( child_process.pid)) child_logger_thread.join(timeout=15) if child_logger_thread.is_alive(): logging.warning( 'logger thread for child process {} remained alive following join ' 'timeout'.format(child_process.pid)) logging.debug('joining child process {}'.format(child_process.pid)) child_process.join(timeout=15) # if the child process has not yet terminated, kill the child process at # the risk of losing any log message not yet buffered by the main logger try: os.kill(child_process.pid, signal.SIGKILL) logging.warning( 'child process {} remained alive following join timeout and had to ' 'be killed'.format(child_process.pid)) except: pass return_code_queue.close() return_code_queue_map.pop(video_file_name) child_logger_thread_map.pop(video_file_name) child_process_map.pop(video_file_name) except Empty: pass return total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration start = time() while len(video_file_paths) > 0: # block if logical_device_count + 1 child processes are active while len(return_code_queue_map) > logical_device_count: total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration) try: _ = main_interrupt_queue.get_nowait() logging.debug( 'breaking out of child process generation following interrupt signal') break except: pass video_file_path = video_file_paths.pop() try: start_video_processor(video_file_path) except Exception as e: logging.error('an unknown error has occured while processing ' '{}'.format(video_file_path)) logging.error(e) while len(return_code_queue_map) > 0: logging.debug('waiting for the final {} child processes to ' 'terminate'.format(len(return_code_queue_map))) total_num_processed_videos, total_num_processed_frames, \ total_analysis_duration = close_completed_video_processors( total_num_processed_videos, total_num_processed_frames, total_analysis_duration) # by now, the last device_id_queue_len videos are being processed, # so we can afford to poll for their completion infrequently if len(return_code_queue_map) > 0: sleep_duration = 10 logging.debug('sleeping for {} seconds'.format(sleep_duration)) sleep(sleep_duration) end = time() - start processing_duration = IO.get_processing_duration( end, 'snva {} processed a total of {} videos and {} frames in:'.format( snva_version_string, total_num_processed_videos, total_num_processed_frames)) logging.info(processing_duration) logging.info('Video analysis alone spanned a cumulative {:.02f} ' 'seconds'.format(total_analysis_duration)) logging.info('exiting snva {} main process'.format(snva_version_string))