def save_raw(p_object): #http://chriskiehl.com/article/parallelism-in-one-line/ #https://stackoverflow.com/questions/22411424/python-multiprocessing-pool-map-typeerror-string-indices-must-be-integers-n if hasattr(p_object, 'r_file'): if p_object.type == 'Stream_Details_Raw': db = database(camera_id) id = p_object.stream_details_id rawfile = p_object.rawfile start_time = p_object.start_time p1_object = Stream_Details_Raw p1_object.stream_details_id = id p1_object.rawfilename = rawfile p1_object.server_time = start_time db.put_stream_details_raw(p1_object) print('finished Stream_Details_Raw', os.getpid(), start_time, rawfile) elif p_object.type == 'Stream_Details': db = database(camera_id) if p_object.operation == 'Update': print ('update') else: print('insert') print('finished Stream_Details', os.getpid()) elif p_object.type == 'Analytics_MetaData': print('update') print('finished updating Analytics_MetaData', os.getpid())
def check_done_live(stream_details_id, last_file_name_processed): db = database(camera_id) status = False stream_details_instance = db.session.query(Stream_Details).get( stream_details_id) if stream_details_instance.live == False or stream_details_instance.live == 'False': # If False then done. Otherwise can be True or Process rawf = db.get_stream_details_raw('max_rawfilename', stream_details_id) if rawf[0] == last_file_name_processed: print('End of feed') status = True else: status = False else: # process still running status = False db.close() return status
def save_meta_data(pobj): db = database(camera_id) p_object = Object() p_object.stream_details_id = pobj.Mypreprocobj.stream_details_id p_object.frame_number = pobj.total_frame_counter p_object.timestamp = pobj.Mypreprocobj.start_time + timedelta( seconds=pobj.total_frame_counter / pobj.Mypreprocobj.FPS) p_object.seconds = pobj.total_frame_counter / pobj.Mypreprocobj.FPS for row in pobj.last_frame_results: p_object.label = row['label'] p_object.confidence = row['confidence'] p_object.position = { 'topleft': row['topleft'], 'bottomright': row['bottomright'] } p_object.group_id = pobj.group_id db.put_stream_metadata(p_object) #print('saved ', p_object.timestamp, ' by ', os.getpid()) db.close() return
def monitor(filename, manifest_name, segment_name, start_number, FPS, pool): from Image_Recognize import draw_bound_box from ffmpeg_writer import FFMPEG_VideoWriter # program to monitor static folder for .mkv files as they are being written out # by get_media.py print('First file to start with', filename.format(start_number), start_number) #raw_file = static_dir + filename + '_rawfile' + str(start_number) + '.mkv' logfile = open(static_dir + 'logfile' + ".log", 'w+') i = 0 h = 0 w = 0 c = 0 # you need CTRL C to quit this program # queue_value = multiprocessing.Queue() # queue_value.put(('Start',out_static_dir+segment_name)) counter = 0 start_time = time.time() last_video_time = 0 x = 1 # displays the frame rate every 1 second total_frame_counter = 0 group_id = 0 previous_frame_results = '' raw_file = static_dir + filename.format(start_number) while True and True if STREAM == True else ( start_number <= TOTAL_ITERATIONS): if os.path.isfile(raw_file) == True: if i == 0: # do this only once for the entire session! # This is to warm up ffmpeg with frame shape raw_file = static_dir + filename.format(start_number) capture = cv2.VideoCapture(raw_file) ret, frame = capture.read() # the first mkv file might be a dud. In case all these steps will fail # until increment counter sets to next good file if ret: h, w, c = frame.shape ffmpegwriter = FFMPEG_VideoWriter(logfile, w, h, static_dir, manifest_name, segment_name, FPS) capture.release() i = 1 db = database(camera_id) instance = db.get_stream_details_raw( 'rawfilename', filename.format(start_number)) stream_details_raw_start_time = instance[0] stream_details_id = instance[1] stream_details_instance = db.session.query( Stream_Details).get(stream_details_id) stream_details_instance.manifest_file_name = manifest_name db.session.commit() db.close() Mypreprocobj = preprocessor_object( stream_details_raw_start_time, stream_details_id, FPS) else: print('-->dud fragment skipping') print('Processing', raw_file) capture = cv2.VideoCapture(raw_file) skip_counter = 0 while (capture.isOpened()): try: ret, frame = capture.read() counter += 1 if (time.time() - start_time) > x: print("FPS: ", counter / (time.time() - start_time)) counter = 0 start_time = time.time() if ret: if skip_counter % skip_frames == 0 or skip_counter == 0: # reprint = False frame, last_frame_results = draw_bound_box( frame, '', False) else: # reprint = True frame, last_frame_results = draw_bound_box( frame, last_frame_results, True) ffmpegwriter.write_frame(frame) skip_counter = skip_counter + 1 if total_frame_counter % save_frames == 0: if last_frame_results != None and last_frame_results != '': # write to metadata table only if there are results if compare_labels(previous_frame_results, last_frame_results) == False: group_id = group_id + 1 pobj = Object() pobj.total_frame_counter = deepcopy( total_frame_counter) pobj.last_frame_results = deepcopy( last_frame_results) pobj.group_id = deepcopy(group_id) pobj.Mypreprocobj = Mypreprocobj #save_meta_data(pobj) pool.map(save_meta_data, (pobj, )) else: if previous_frame_results != None and previous_frame_results != '': group_id = group_id + 1 previous_frame_results = last_frame_results total_frame_counter = total_frame_counter + 1 else: capture.release() cv2.destroyAllWindows() break except Exception as e: print(e) #print('-->dud fragment skipping') start_number = start_number + 1 raw_file = static_dir + filename.format(start_number) else: if i == 0: print('-->Initial run. File not found. Waiting for 10 sec') time.sleep(10) print('Done Sleeping') else: print('-->Middle run. File not found. Waiting for .1 sec') time.sleep(.1) print('Done Sleeping') if check_done_live(stream_details_id, filename.format(start_number - 1)) == True: #break out of the while loop break # need to close everything and save one last time pool.close() pool.join() if i == 0: print('Need to increase TOTAL_ITERATIONS value!!') else: capture.release() cv2.destroyAllWindows() ffmpegwriter.close() db = database(camera_id) instance = db.get_analytics_metaData_object('manifest_next_value') instance.value = str(int(instance.value) + 1) db.session.commit() instance = db.get_analytics_metaData_object('raw_file_prev_value') instance.value = start_number db.session.commit() db.close() print('Saving done!') return
for rows in current_labels: current_label_list.append(rows['label']) if last_label_list == current_label_list: return True else: return False if __name__ == "__main__": print( 'Run save media first to warm up the GPUs and then only start stream and get_media' ) pool = multiprocessing.Pool(processes=no_of_processes) db = database(camera_id) filename = 'test_' + str(camera_id) + '_rawfile{:08d}.mkv' # get next value for manifest based on camera id which is instantiated above meta_data_instance = db.get_analytics_metaData_object( 'manifest_next_value') i = int(meta_data_instance.value) manifest_name = 'best_' + str(camera_id) + '_' + str(i) + '' + '.m3u8' segment_name = 'best_' + str(camera_id) + '_' + str(i) + '_' start_number = int( db.get_analytics_metaData_object('raw_file_prev_value').value) FPS = 30 # Test harness monitor(filename, manifest_name, segment_name, start_number, FPS, pool)
def get_kvs_stream(pool,selType , arn = DEFAULT_ARN, date='' ): # get camera id given arn name db = database(camera_id) stream_instance = db.get_stream_object('arn',arn) kinesis_client = session.client('kinesisvideo') #use response object below to find the correct end point #response = kinesis_client.list_streams() response = kinesis_client.get_data_endpoint( StreamARN = arn, APIName = 'GET_MEDIA' ) video_client = session.client('kinesis-video-media', endpoint_url=response['DataEndpoint'] ) # 'StartSelectorType': 'FRAGMENT_NUMBER'|'SERVER_TIMESTAMP'|'PRODUCER_TIMESTAMP'|'NOW'|'EARLIEST'|'CONTINUATION_TOKEN', # stream = video_client.get_media( # StreamARN=DEFAULT_ARN, # StartSelector={'StartSelectorType': selType} # ) if selType == '': # get stream from last continuation token stream = video_client.get_media( StreamARN=DEFAULT_ARN, StartSelector={'StartSelectorType': 'CONTINUATION_TOKEN','ContinuationToken': continuation_token} ) if selType == 'EARLIEST': # get stream from last continuation token stream = video_client.get_media( StreamARN=DEFAULT_ARN, StartSelector={'StartSelectorType': 'EARLIEST'} ) if selType == 'PRODUCER_TIMESTAMP': # get stream from last time stream = video_client.get_media( StreamARN=DEFAULT_ARN, StartSelector={'StartSelectorType': 'SERVER_TIMESTAMP','StartTimestamp': date} ) if selType == 'NOW': stream = video_client.get_media( StreamARN=DEFAULT_ARN, StartSelector={'StartSelectorType': 'NOW'} ) stream_details_instance = Object() stream_details_instance.stream_id = stream_instance.id stream_details_instance.live = True stream_details_instance.resolution = str(w) + 'x' + str(h) + 'x3' else: # old stream. Check if stream details record exists or not p_object = Object() p_object.id = stream_instance.id p_object.start_time = date stream_details_instance = db.get_stream_details_object('start_time', p_object) if stream_details_instance is None: # new stream details instance stream_details_instance = Stream_Details stream_details_instance.stream_id = stream_instance.id stream_details_instance.live = False stream_details_instance.resolution = str(w) + 'x' + str(h) + 'x3' else: print('Session details exist with same timestamp!!!! Exiting!') exit() # Note this amount might not be exactly correct because the data is already compressed read_amt = h*w*3*1*1 #(h*w*no. of pixels*fps*1 seconds worth) #TODO need i to be in db otherwise will continue to overwrite files meta_data_instance = db.get_analytics_metaData_object('raw_file_next_value') i = int(meta_data_instance.value) #j = 0 write_buffer = b'' # get some time variables onesecond = 1 counter = 1 start_time = time.time() # end of timing variables first_time = True while True: datafeedstreamBody = stream['Payload'].read(amt=read_amt) write_buffer,last_c_token,i, s_time, e_time = process_stream(datafeedstreamBody, static_dir, filename,i, write_buffer,db,stream_details_instance,pool) if first_time == True: first_time = False stream_details_instance.start_time = s_time db.put_stream_details(stream_details_instance) #print(sys.getsizeof(datafeedstreamBody),j) #j = j +1 counter += 1 if (time.time() - start_time) > onesecond: #print('Last token found', last_c_token) #print("Bytes processed per second: ", read_amt / (counter / (time.time() - start_time)), end="", flush=True) print("MB processed per second: ", (read_amt/1024/1024) / (counter / (time.time() - start_time))) counter = 0 start_time = time.time() if sys.getsizeof(datafeedstreamBody) < read_amt: print('Exiting with total bytes pulled =' , read_amt*i) #TODO need to sleep here if streaming - because program might be pulling faster than ingest break print('Streaming done!') stream_details_instance.end_time = e_time db.put_stream_details(stream_details_instance) pool.close()