def savedetectedimagetofile(Image, frameid, result, cameraname, display_str, framenameprefix): image_np_with_detections = Image.copy() visualization_util.visualize_boxes_and_labels_on_image_array( image_np_with_detections, result['boxes'], result['classes'], result['scores'], category_index, use_normalized_coordinates=False, max_boxes_to_draw=200, min_score_thresh=Threshold, agnostic_mode=False) #display_str=f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}' visualization_util.draw_text_on_image(image_np_with_detections, 0, 0, display_str, color='black') #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile) name = './output/frames/' + framenameprefix + str( frameid) + '_' + cameraname + '.jpg' #print ('Creating\...' + name) cv2.imwrite(name, cv2.cvtColor(image_np_with_detections, cv2.COLOR_RGB2BGR)) #write to image folder
def evaluateallframescreatesubmission(frames, outputsubmissionfilepath, outputfile="./output_video1.mp4"): array_len = len(frames) #4931 frames for validation_0000 # 20, 200 frames in one file, downsample by 10 print("Frames lenth:", array_len) print("Final_array type:", type(frames)) # class 'list' objects = metrics_pb2.Objects() # submission objects frame_width = 1920 frame_height = 1280 out = cv2.VideoWriter(outputfile, cv2.VideoWriter_fourcc( 'M', 'P', '4', 'V'), 5, (frame_width, frame_height)) fps = FPS().start() wod_latency_submission.initialize_model() required_field = wod_latency_submission.DATA_FIELDS print(required_field) for frameid in range(array_len): #frameid = 5 print("frameid:", frameid) # {'key':key, 'context_name':context_name, 'framedict':framedict} currentframe=frames[frameid] convertedframesdict = convert_frame_to_dict_cameras(currentframe) #data_array[frameid] frame_timestamp_micros = convertedframesdict['TIMESTAMP']#['key'] context_name = currentframe.context.name #convertedframesdict['context_name'] #framedict = convertedframesdict['framedict'] # 10017090168044687777_6380_000_6400_000 #print('context_name:', context_name) # print('frame_timestamp_micros:', frame_timestamp_micros) # 1550083467346370 #result = wod_latency_submission.run_model(framedict[required_field[0]], framedict[required_field[1]]) #result = wod_latency_submission.run_model(**framedict) #Front_image = framedict[required_field[0]] start_time = time.time() #result = wod_latency_submission.run_model(Front_image) result = wod_latency_submission.run_model(**convertedframesdict)#All images end_time = time.time() elapsed_time = end_time - start_time print('Inference time: ' + str(elapsed_time) + 's') # print(result) createsubmisionobject(objects, result['boxes'], result['classes'], result['scores'], context_name, frame_timestamp_micros) # Save the original image #output_path = "./test.png" #visualization_util.save_image_array_as_png(Front_image, output_path) Front_image = convertedframesdict[required_field[0]] image_np_with_detections = Front_image.copy() visualization_util.visualize_boxes_and_labels_on_image_array(image_np_with_detections, result['boxes'], result['classes'], result['scores'], category_index, use_normalized_coordinates=False, max_boxes_to_draw=200, min_score_thresh=Threshold, agnostic_mode=False) display_str=f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}' visualization_util.draw_text_on_image(image_np_with_detections, 0, 0, display_str, color='black') #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile) name = './Test_data/frame' + str(frameid) + '.jpg' #print ('Creating\...' + name) # cv2.imwrite(name, image_np_with_detections) #write to image folder fps.update() #out.write(image_np_with_detections) out.write(cv2.cvtColor(image_np_with_detections, cv2.COLOR_RGB2BGR)) #cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) out.release() with open('objectsresult.pickle', 'wb') as f: pickle.dump(objects, f) submission = submission_pb2.Submission() submission.task = submission_pb2.Submission.DETECTION_2D submission.account_name = '*****@*****.**' submission.authors.append('Kaikai Liu') submission.affiliation = 'None' submission.unique_method_name = 'torchvisionfaster' submission.description = 'none' submission.method_link = "empty method" submission.sensor_type = submission_pb2.Submission.CAMERA_ALL submission.number_past_frames_exclude_current = 0 submission.number_future_frames_exclude_current = 0 submission.inference_results.CopyFrom(objects) f = open(outputsubmissionfilepath, 'wb') # output submission file f.write(submission.SerializeToString()) f.close() now = datetime.datetime.now() print("Finished validation, current date and time : ") print(now.strftime("%Y-%m-%d %H:%M:%S"))
def evaluateallframes(base_dir, filename, outputfile="./output_video1.mp4"): Final_array = np.load(base_dir / filename, allow_pickle=True, mmap_mode='r') data_array = Final_array['arr_0'] array_len = len(data_array) # 20, 200 frames in one file, downsample by 10 print("Final_array lenth:", array_len) print("Final_array type:", type(data_array)) # numpy.ndarray frame_width = 1920 frame_height = 1280 out = cv2.VideoWriter(outputfile, cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 2, (frame_width, frame_height)) fps = FPS().start() wod_latency_submission.initialize_model() required_field = wod_latency_submission.DATA_FIELDS print(required_field) for frameid in range(array_len): #frameid = 5 print("frameid:", frameid) # {'key':key, 'context_name':context_name, 'framedict':framedict} convertedframesdict = data_array[frameid] frame_timestamp_micros = convertedframesdict['key'] context_name = convertedframesdict['context_name'] framedict = convertedframesdict['framedict'] # 10017090168044687777_6380_000_6400_000 #print('context_name:', context_name) #print('frame_timestamp_micros:', frame_timestamp_micros) # 1550083467346370 #result = wod_latency_submission.run_model(framedict[required_field[0]], framedict[required_field[1]]) #result = wod_latency_submission.run_model(**framedict) Front_image = framedict[required_field[0]] start_time = time.time() result = wod_latency_submission.run_model(Front_image) end_time = time.time() elapsed_time = end_time - start_time print('Inference time: ' + str(elapsed_time) + 's') #print(result) #Save the original image #output_path = "./test.png" #visualization_util.save_image_array_as_png(Front_image, output_path) image_np_with_detections = Front_image.copy() visualization_util.visualize_boxes_and_labels_on_image_array( image_np_with_detections, result['boxes'], result['classes'], result['scores'], category_index, use_normalized_coordinates=False, max_boxes_to_draw=200, min_score_thresh=Threshold, agnostic_mode=False) #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile) name = './Test_data/frame' + str(frameid) + '.jpg' #print ('Creating\...' + name) #cv2.imwrite(name, image_np_with_detections) #write to image folder fps.update() out.write(image_np_with_detections) # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) out.release()
def evaluatesingleframe(base_dir, filename, frameid, outputfile="./testresult.png"): Final_array = np.load(base_dir / filename, allow_pickle=True, mmap_mode='r') data_array = Final_array['arr_0'] array_len = len(data_array) # 20, 200 frames in one file, downsample by 10 print("Final_array lenth:", array_len) print("Final_array type:", type(data_array)) # numpy.ndarray # for frameid in range(array_len): #frameid = 5 print("frameid:", frameid) # {'key':key, 'context_name':context_name, 'framedict':framedict} convertedframesdict = data_array[frameid] frame_timestamp_micros = convertedframesdict['key'] context_name = convertedframesdict['context_name'] framedict = convertedframesdict['framedict'] # 10017090168044687777_6380_000_6400_000 print('context_name:', context_name) print('frame_timestamp_micros:', frame_timestamp_micros) # 1550083467346370 start_time = time.time() wod_latency_submission.initialize_model() end_time = time.time() elapsed_time = end_time - start_time print('Model Inalization elapsed time: ' + str(elapsed_time) + 's') required_field = wod_latency_submission.DATA_FIELDS print(required_field) #result = wod_latency_submission.run_model(framedict[required_field[0]], framedict[required_field[1]]) #result = wod_latency_submission.run_model(**framedict) Front_image = framedict[required_field[0]] start_time = time.time() result = wod_latency_submission.run_model(Front_image) end_time = time.time() elapsed_time = end_time - start_time print('Inference time: ' + str(elapsed_time) + 's') #print(result) #Save the original image #output_path = "./test.png" #visualization_util.save_image_array_as_png(Front_image, output_path) image_np_with_detections = Front_image.copy() # category_index = { 1: {'id': 1, 'name': 'VEHICLE'}, \ # 2: {'id': 2, 'name': 'PEDESTRIAN'}, \ # 3: {'id': 3, 'name': 'SIGN'}, \ # 4: {'id': 4, 'name': 'CYCLIST'}} # label_map_path = '2DObject/tfobjectdetection/waymo_labelmap.txt' # label_map = label_map_util.load_labelmap(label_map_path) # categories = label_map_util.convert_label_map_to_categories( # label_map, # max_num_classes=label_map_util.get_max_label_map_index(label_map), # use_display_name=True) # category_index = label_map_util.create_category_index(categories) visualization_util.visualize_boxes_and_labels_on_image_array( image_np_with_detections, result['boxes'], result['classes'], result['scores'], category_index, use_normalized_coordinates=False, max_boxes_to_draw=200, min_score_thresh=Threshold, agnostic_mode=False) visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)
# data = { # allcameras[0]: np.load(os.path.join(timestamp_dir, f'{imagename}.npy')) # } inputimage = np.load(os.path.join(timestamp_dir, f'{imagename}.npy')) resultdict = allcameraresult[imagename] #one camera #print(f'imagename:{imagename}, resultdict:{resultdict}') boxes = resultdict['boxes'] classes = resultdict['classes'] scores = resultdict['scores'] visualization_util.visualize_boxes_and_labels_on_image_array( inputimage, boxes, classes, scores, category_index, use_normalized_coordinates=False, max_boxes_to_draw=200, min_score_thresh=0.1, agnostic_mode=False) display_str = f'context_name: {context_name}, timestamp_micros: {timestamp_micros}' visualization_util.draw_text_on_image(inputimage, 0, 0, display_str, color='black') name = './frame' + nameprefix + str(imagename) + '.jpg' print('Creating\...' + name) cv2.imwrite(name, cv2.cvtColor(inputimage,