def run_my_code(self, images): sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'src')) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'models','research')) #print(sys.path) import object_detection_lib # Create the instance of ObjectDetection odc = object_detection_lib.ObjectDetection(0.5) #print("odc defined") predictions = [] for image in images: #image is already a cvimg print(image) #print(odc.run_inference_for_single_image(image)) output_dict_filtered = odc.run_inference_for_single_image(image)[1] labels = output_dict_filtered["detection_labels"] #unicode strings boxes = output_dict_filtered["detection_boxes"] confidences = output_dict_filtered["detection_scores"] #Write JSON files prediction = [] for i in range(0, len(labels)): prediction.append({"label":labels[i],"confidence":confidences[i],"x":boxes[i][0],"y":boxes[i][1],"w":boxes[i][2],"h":boxes[i][3]}) predictions.append(prediction) print(predictions) return predictions #list of [{'confidence': 0.71, 'label': 'person'}, {'label1': 'duckie', 'confidence': 0.6}] elements
def __init__(self): self.__bridge = CvBridge() # Publisher to publish predicted image self.__image_pub = rospy.Publisher('/' + os.environ['DUCKIEBOT_NAME'] + '/prediction_images', Image, queue_size=1) # Publisher to publish the result of classes # self.__result_pub = rospy.Publisher('/'+os.environ['DUCKIEBOT_NAME'] +"/predicted_result", detection_results, queue_size=1) # Subscribe to topic which will kick off object detection in the next image # self.__command_sub = rospy.Subscriber('/'+os.environ['DUCKIEBOT_NAME'] + "/start", Empty, self.StartCallback) # Subscribe to the topic which will supply the image fom the camera self.__image_sub = rospy.Subscriber( '/' + os.environ['DUCKIEBOT_NAME'] + "/camera_node/image/raw", Image, self.Imagecallback) # Flag to indicate that we have been requested to use the next image self.__scan_next = True # Read the confidence level, any object with a level below this will not be used # This parameter is set in the config.yaml file confidence_level = rospy.get_param( '/object_detection/confidence_level', 0.50) # Create the object_detection_lib class instance self.__odc = object_detection_lib.ObjectDetection(confidence_level)
def __init__(self): self.__bridge = CvBridge() # Publisher to publish update image self.__image_pub = rospy.Publisher( 'tf_object_detection_node/image/compressed', CompressedImage, queue_size=1) # Subscribe to the topic which will supply the image fom the camera self.__image_sub = rospy.Subscriber('raspicam_node/image/compressed', CompressedImage, self.Imagecallback) # Read the path for models/research/object_detection directory from the parameter server or use this default object_detection_path = rospy.get_param( '/object_detection/path', '/home/ubuntu/git/models/research/object_detection') # Read the confidence level, any object with a level below this will not be used confidence_level = rospy.get_param( '/object_detection/confidence_level', 0.50) # Create the object_detection_lib class instance self.__odc = object_detection_lib.ObjectDetection( object_detection_path, confidence_level) # Create the Action server self.__as = actionlib.SimpleActionServer('object_detection', scan_for_objectsAction, self.do_action, False) self.__as.start()
def __init__(self): self.__bridge = CvBridge() # Publisher to publish update image #self.__image_pub = rospy.Publisher("test1", Image, queue_size=1) # Subscribe to the topic which will supply the image fom the camera self.__image_sub = rospy.Subscriber("/bebop/image_raw",Image, self.Imagecallback,queue_size=None,buff_size=2**24) # Flag to indicate that we have been requested to use the next image #self.__scan_next = False # Read the path for models/research/object_detection directory from the parameter server or use this default object_detection_path = rospy.get_param('/object_detection/path', '/home/lee_ming_wei/models/research/object_detection') # Read the confidence level, any object with a level below this will not be used confidence_level = rospy.get_param('/object_detection/confidence_level', 0.70) # Create the object_detection_lib class instance self.__odc = object_detection_lib.ObjectDetection(object_detection_path, confidence_level)
#!/usr/bin/env python import sys import os import cv2 sys.path.append('../src') sys.path.append("../models/research/") import object_detection_lib # Create the instance of ObjectDetection odc = object_detection_lib.ObjectDetection(0.5) cvimg = cv2.imread("/Users/zhou/Desktop/duckietown/duckietown_raw_dataset/all_images/good/b_BR_doort_frame00380.jpg") # Detect the objects object_names = odc.scan_for_objects(cvimg) print(object_names) cv2.imshow('object detection', cvimg) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite('adjusted_test_image.jpg', cvimg)
#!/usr/bin/env python import sys import os import cv2 sys.path.append('/home/ubuntu/git/tf_object_detection/src') import object_detection_lib # Create the instance of ObjectDetection odc = object_detection_lib.ObjectDetection( '/home/samuelc/dev/recyclables_detector_wss/models/research/object_detection', 0.5) cvimg = cv2.imread('../img/images.jpeg') # Detect the objects object_names = odc.scan_for_objects(cvimg) print(object_names) cv2.imshow('object detection', cvimg) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite('adjusted_test_image.jpg', cvimg)
#!/usr/bin/env python import sys import os import cv2 sys.path.append('/home/ubuntu/git/tf_object_detection/src') import object_detection_lib # Create the instance of ObjectDetection odc = object_detection_lib.ObjectDetection( '/home/ubuntu/git/models/research/object_detection', 0.5) cvimg = cv2.imread("test_image.jpg") # Detect the objects object_names = odc.scan_for_objects(cvimg) print(object_names) cv2.imshow('object detection', cvimg) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite('adjusted_test_image.jpg', cvimg)