def run(self): """ Detects objects in image and returns list of objects detected. Measures the amount of time taken to analyze an image (regardless of the accuracy of the detection)""" perception_results = {} # If no checkpoint specified, will assume `accurate` by default. In this case, # we want to use our traffic checkpoint. The Detector can also take a config # object. detector = Detector(self.checkpoint) for full_image_path in self.list_of_images: if os.path.exists(full_image_path): image_path, image_name = os.path.split(full_image_path) image_ext = image_name.split('.')[-1] image = read_image(full_image_path) perception_results.update({image_name: {}}) # Returns a dictionary with the detections. start_time = datetime.datetime.now() objects = detector.predict(image) end_time = datetime.datetime.now() time_to_get_objects = end_time - start_time # print(objects) perception_results[image_name].update({"objects": objects}) perception_results[image_name].update({"detection_time": time_to_get_objects}) if self.to_save_result: self.save_image_and_objects(image, image_name, image_ext, objects) else: print("ERROR: image not found: %s" % full_image_path) del detector return perception_results
def validate_perception(img): d = Detector(checkpoint=checkpoint_name) image = read_image(img) start = datetime.datetime.now() p = d.predict(image) end = datetime.datetime.now() print(p) execution_time = end - start return {execution_time: p}
class ImgDetector(object): def __init__(self): super(ImgDetector, self).__init__() self.detector = Detector() def predict_img(self, image_path: str) -> list: image_ = self.read_the_image(image_path) objects = self.detector.predict(image_) self.visual_objects(image_, objects) return objects @staticmethod def visual_objects(image, objects): vis_objects(image, objects).save(BaseConfig.IMAGE_OUTPUT) @staticmethod def read_the_image(image): image_ = read_image(image) return image_ @staticmethod def find_object(objects: list, obj_label: str): global ent for item in objects: if item["label"] == obj_label: ent = Entity().set_entity(item["bbox"], item["label"], item["prob"]) if ent.label == obj_label: return ent, True else: return None, False
def detectTableBoundaries(self): Helper.print("Before the Checkpoint") detector = Detector(checkpoint=checkpointName) for folder in self.image_folder_set: Helper.print("Detecting table boundaries of file: " + folder) book = xlwt.Workbook() worksheet = book.add_sheet(folder) base_folder_path = os.path.join(self.preprocessed_images_path, folder) images = FileHelper.getAllFilesInFolder(base_folder_path) current_row = 0 for image in images: page = re.search("^.*_(\d*)\..*$", image).group(1) tableInfos = self.detect(os.path.join(base_folder_path, image), detector) for tableInfo in tableInfos: worksheet.write(current_row, 0, page) #left = xmin worksheet.write(current_row, 1, tableInfo["bbox"][0]) #top = ymin worksheet.write(current_row, 2, tableInfo["bbox"][1]) #right = xmax worksheet.write(current_row, 3, tableInfo["bbox"][2]) #bottom = ymax worksheet.write(current_row, 4, tableInfo["bbox"][3]) current_row += 1 #print(tableInfo) Helper.print("Create Excel of table boundaries for file: " + folder) book.save( os.path.join(self.output_path, self.output_boundaries_path, folder) + ".xls")
class ValidatePerception(object): detector = Detector() def __init__(self, data_set): self.image = data_set def get_results(self): res = self.detector.predict(self.image) return res
def run_module(): """Runs Luminoth module to simulate perception outputs. Args: images: String or list of strings for the paths or directories to run predictions in. For directories, will return all the files within. Returns: List of objects detected (bounding box with probabaility and name of object. """ images = [ cv2.imread(file) for file in glob.glob(r'E:\CarTests\*.' + 'IMAGE_FORMATS') ] detector = Detector(checkpoint='cars') # Returns a dictionary with the detections. objects = detector.predict(images) print(objects) vis_objects(images, objects).save(r'E:\CarTests\objects') return objects
def predictor_object( checkpoint, image, save_image=False, ): prediction_objects = Detector(checkpoint=checkpoint).predict( read_image(image)) image_output_path = str(Path.cwd() / 'images_output' / 'output-{date}.png').format(date=date.today()) if save_image: vis_objects( read_image(image), prediction_objects, ).save(image_output_path) return prediction_objects return prediction_objects
def detector(): return Detector()
def __init__(self): super(ImgDetector, self).__init__() self.detector = Detector()
def run(self, save_path): detector = Detector(self.checkpoint) image = read_image(self.image) objects = detector.predict(image) vis_objects(image, objects).save(save_path) return objects
# use only the box coordinates bounding_box = [b['bbox'] for b in predicted_objects] bounding_box = outlier_rejection(bounding_box) for info, box in zip(predicted_objects, bounding_box): vertebra = box label = str(info['label']) prob = str(info['prob']) display_text = "Label:" + label + ",Prob:" + prob print(display_text) cv2.rectangle(image, (vertebra[0], vertebra[1]), (vertebra[2], vertebra[3]), (0, 255, 0), 5) print(vertebra[0], vertebra[1], vertebra[2], vertebra[3]) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, display_text, (vertebra[0], vertebra[1]), font, 1, (0, 0, 255), 3, cv2.LINE_AA) #cv2.imwrite('01-July-2019-31.jpg',image) return image if __name__ == "__main__": checkpoint = "d40a34821081" detection = Detector(checkpoint) image_path = "data/cropped test/" output_path = "pred crop1/" #image = read_image(image_path + '01-July-2019-50.jpg') #pred_image= predict_vertebra (image,detection) save_image(image_path, detection, output_path)
# pip install luminoth from luminoth import Detector, read_image, vis_objects from PIL import Image import os # Changing the current directory in the one of the .py file try: os.chdir(os.path.dirname(__file__)) except: pass # Reading the .jpg image image = read_image('Pets.jpg') # Creating the detector detector = Detector() # Returning a dictionary with the detections objects = detector.predict(image) print(objects) # Creating a .jpg file with the detections vis_objects(image, objects).save('Pets-out.jpg') # Showing the image image = Image.open('Pets-out.jpg') image.show() # Deleting the image file_path = 'Pets-out.jpg' os.remove(file_path)
from luminoth import Detector, read_image, vis_objects detector = Detector(checkpoint='b9bdfe47f743') def predict_anchor( image_path="/Users/balajidr/Developer/fyp_final/mainapp/functions/RCNN/testimages/slide-Table.jpg" ): image = read_image(image_path) # If no checkpoint specified, will assume `accurate` by default. In this case, # we want to use our traffic checkpoint. The Detector can also take a config # object. # Returns a dictionary with the detections. objects = detector.predict(image) print(objects) vis_objects(image, objects).save('traffic-out.png') return objects
from luminoth import Detector, read_image as ri EXPECTED_RESULTS = [{ 'bbox': [331, 395, 793, 1877], 'label': 'person', 'prob': 0.9997 }, { 'bbox': [728, 408, 1090, 1895], 'label': 'person', 'prob': 0.9995 }, { 'bbox': [325, 404, 618, 1304], 'label': 'person', 'prob': 0.9515 }] QA_test_set = ri('heavy_rain.jpg') detector = Detector(checkpoint='checkpoint_A1.0.1') ACTUAL_RESULTS = detector.predict(QA_test_set) assert ACTUAL_RESULTS == EXPECTED_RESULTS, "RESULTS DO not match ! "