Esempio n. 1
0
def processImage(input_file, output_file):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    execution_path = os.getcwd()
    # input_file = sys.argv[1]
    # output_file = sys.argv[2]

    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()

    custom_objects = detector.CustomObjects(bottle=True,
                                            apple=True,
                                            orange=True,
                                            sandwich=True,
                                            hot_dog=True,
                                            pizza=True,
                                            donut=True,
                                            cake=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image=os.path.join(execution_path, input_file),
        output_image_path=os.path.join(execution_path, output_file),
        minimum_percentage_probability=30)

    totalPrice = 0
    for eachObject in detections:
        totalPrice = totalPrice + prices[eachObject["name"]]
        # print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] , " : " , prices[eachObject["name"]] , " Baht" )
        # print("================================")
    return (totalPrice, output_file)
Esempio n. 2
0
 def Crop_image(self,params):
     url = ""
     if 'url' in params.keys():
         url = params['url']
         print("herrrrrrrrrrrrrrrrrrrrrrrrr:",url)
     else:
         return "Error: No url field provided. Please specify an url."
     im = self.getimage(params)
     detector = ObjectDetection()
     detector.setModelTypeAsRetinaNet()
     detector.setModelPath(os.path.join(parent,'Mobilaty\\project\\public\\resnet50_coco_best_v2.0.1.h5'))
     detector.loadModel()
     custom_objects = detector.CustomObjects(cell_phone=True)
     detections = detector.detectCustomObjectsFromImage(input_image=image_path,
                                                             output_image_path=os.path.join(parent,'Mobilaty\\project\\public\\result.jpeg'),
                                                             custom_objects=custom_objects, minimum_percentage_probability=50)        
     objs = ""
     counter = 0
     for eachObject in detections:
         objs+=eachObject["name"] + " : " + str(eachObject["percentage_probability"])
         print(eachObject["name"] + " : " + str(eachObject["percentage_probability"]))
         
         bbox = eachObject["box_points"]
         crop_img = im[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])]
         try:
             cv2.imwrite(os.path.join(parent,'Mobilaty\\project\\public\\'+str(counter)+"image.jpg"), crop_img)
         except:
             return str("Error in write")
         counter+=1
     os.remove(image_path)
     os.remove(os.path.join(parent,'Mobilaty\\project\\public\\result.jpeg'))
     return str(objs)
Esempio n. 3
0
def init_detection():
    global detector, custom_objects
    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath("../resnet50_coco_best_v2.0.1.h5")
    detector.loadModel()
    custom_objects = detector.CustomObjects(person=True)
Esempio n. 4
0
class Detector():
    def __init__(self):
        ## Set up detector
        self.detector = ObjectDetection()

        # Load model
        self.detector.setModelTypeAsYOLOv3()
        #self.detector.setModelTypeAsRetinaNet()
        model_name = 'pretrained-yolov3.h5'  #SET TO NAME OF DESIRED MODEL OF USE
        model_path = os.path.join(os.getcwd(), model_name)
        self.detector.setModelPath(os.path.join(os.getcwd(), model_path))
        self.detector.loadModel()

        # Set custom object detection for dogs
        # For more options, see: https://github.com/OlafenwaMoses/ImageAI/tree/master/imageai/Detection#---custom-object-detection
        self.custom_objects = self.detector.CustomObjects(dog=True)

    def detect(self, img):
        # Detect
        '''
        _ , detections = self.detector.detectCustomObjectsFromImage(input_type = "array",
                                                                    input_image= img,
                                                                    output_type = "array",
                                                                    custom_objects=self.custom_objects,
                                                                    minimum_percentage_probability=80)
        '''
        _, detections = self.detector.detectCustomObjectsFromImage(
            input_image=img,
            custom_objects=self.custom_objects,
            minimum_percentage_probability=80)
        bounding_boxes = []
        for d in detections:
            bounding_boxes.append(d['box_points'])
        return bounding_boxes
Esempio n. 5
0
def tell_dire():
    # 路径直接写死了C:\\Users\\skywf\\Desktop\\docker_image.jpg图片直接输出到桌面
    execution_path = os.getcwd()
    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, 'resnet50_coco_best_v2.0.1.h5'))
    detector.loadModel()
    # a = time.time()
    custom_objects = detector.CustomObjects(book=True, cell_phone=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image='C:\\Users\\skywf\\Desktop\\docker_image.jpg',
        output_image_path='C:\\Users\\skywf\\Desktop\\imagenew.jpg',
        minimum_percentage_probability=40,
        box_show=True)
    print(len(detections))
    if (len(detections) == 0):
        return 'forward'
    # b = time.time()
    # print('the time is {}'.format(b-a))
    # print('the direction is {}'.format(detections[0]['direction']))
    else:
        for eachObject in detections:
            print(eachObject['name'] + ':' +
                  eachObject['percentage_probability'])
        return detections[0]['direction']
Esempio n. 6
0
class CameraCheck:
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not CameraCheck._instance:
            CameraCheck._instance = super(CameraCheck,
                                          cls).__new__(cls, *args, **kwargs)
        return CameraCheck._instance

    def __init__(self):
        self.detector = ObjectDetection()
        # self.detector.setModelTypeAsTinyYOLOv3()
        # self.detector.setModelPath("yolo-tiny.h5")
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath("yolo.h5")
        self.detector.loadModel()
        self.custom = self.detector.CustomObjects(person=True)

    def check(self, image_name):
        execution_path = os.getcwd()
        detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.custom,
            input_image=os.path.join(execution_path, image_name),
            output_image_path=os.path.join(execution_path,
                                           "detected_people.jpg"),
            minimum_percentage_probability=20)
        return len(detections)
Esempio n. 7
0
class Detector():
    def __init__(self):
        ## Set up detector
        self.detector = ObjectDetection()

        # Load model
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelTypeAsYOLOv3()
        model_path = 'resnet50_coco_best_v2.0.1.h5'
        self.detector.setModelPath(os.path.join(os.getcwd(), model_path))
        self.detector.loadModel(detection_speed='fast')

        # Set custom object detection for dogs
        # For more options, see: https://github.com/OlafenwaMoses/ImageAI/tree/master/imageai/Detection#---custom-object-detection
        self.custom_objects = self.detector.CustomObjects(dog=True)

    def detect(self, input_path, output_path):
        # Detect
        detections = self.detector.detectCustomObjectsFromImage(
            input_image=input_path,
            output_image_path=output_path,
            custom_objects=self.custom_objects,
            minimum_percentage_probability=45)
        bounding_boxes = []
        for d in detections:
            bounding_boxes.append(d['box_points'])
        return bounding_boxes
Esempio n. 8
0
def init():
    print("Loading models...")
    global RACKDETECTION_MODEL, LAMPDETECTION_MODEL, BASKETDETECTION_MODEL, FRAMEDETECTION_MODEL, COLORSDETECTION_MODEL, graph, detector, custom
    # TODO: MODELS_PATH is an environment variable
    MODELS_PATH = 'C:/Users/lucasle/Desktop/MP/LHFork_2/MjukvaruProjekt/bfr/models/'
    #models/
    PATH_BIKEDETECTION_MODEL = MODELS_PATH + 'resnet50_coco_best_v2.0.1.h5'
    PATH_RACKDETECTION_MODEL = MODELS_PATH + 'rack/Adam_10_epochs_4layers.h5'  #Adam_10_epochs_4layers.h5
    PATH_LAMPDETECTION_MODEL = MODELS_PATH + 'lamp/Adam_3_epochs_4layers_lamp.h5'
    PATH_FRAMEDETECTION_MODEL = MODELS_PATH + 'frame/Adam_5_epochs_4layers_frame.h5'
    PATH_BASKETDETECTION_MODEL = MODELS_PATH + 'basket/Adam_5_epochs_4layers_bakset.h5'
    PATH_COLORSDETECTION_MODEL = MODELS_PATH + 'colors/Adam_5_epochs_4layers_colors.h5'

    #Models
    RACKDETECTION_MODEL = load_model(PATH_RACKDETECTION_MODEL)
    LAMPDETECTION_MODEL = load_model(PATH_LAMPDETECTION_MODEL)
    BASKETDETECTION_MODEL = load_model(PATH_BASKETDETECTION_MODEL)
    FRAMEDETECTION_MODEL = load_model(PATH_FRAMEDETECTION_MODEL)
    COLORSDETECTION_MODEL = load_model(PATH_COLORSDETECTION_MODEL)

    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(PATH_BIKEDETECTION_MODEL)
    detector.loadModel()
    custom = detector.CustomObjects(bicycle=True)

    graph = tf.get_default_graph()
    print("Loading done!")
def init_detector():
    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath('D:/_assets/models/yolo.h5')
    detector.loadModel()
    custom_objects = detector.CustomObjects(person=True)
    return detector, custom_objects
Esempio n. 10
0
def predictions():
    
    """ Description: 
    Object detection using Tensorflow and Resnet50.
    :raises:
    Predicted Objects
    :rtype:
    Dictionary
    """
    execution_path = os.getcwd()
    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()
    custom_objects = detector.CustomObjects(
                                            bicycle=bicycle, 
                                            fire_hydrant=fire_hydrant
                                            )
    detections = detector.detectCustomObjectsFromImage(
                                                       custom_objects = custom_objects, 
                                                       thread_safe = True, 
                                                       minimum_percentage_probability=30, 
                                                       input_image=os.path.join(execution_path , "images/image.png"), 
                                                       output_image_path=os.path.join(execution_path , "images/imagenew.png")
                                                       )
    return detections
def body_detection(file):
    detector = ObjectDetection()

    # set the model type
    # 1. Retina net
    # 2. Yolov3 (currently used)
    # 3. Yolo-tiny
    detector.setModelTypeAsYOLOv3()

    # provide the location of the h5 file
    detector.setModelPath(os.getcwd() + "/static/models/yolov3/yolo.h5")

    # load the model
    detector.loadModel()

    # 80 objects can be detected, but only focusing on Human Bodies
    custom = detector.CustomObjects(person=True)
    detections, extracted_objects = detector.detectCustomObjectsFromImage(
        custom_objects=custom,
        input_image=file,
        output_image_path=os.getcwd() + "/static"
        "/op_images/" + file,
        display_object_name=False,
        display_percentage_probability=False,
        extract_detected_objects=True,
        minimum_percentage_probability=30)
    return detections, extracted_objects
Esempio n. 12
0
def body_detection(file_name):
    try:
        path = make__path(os.path.join(get__date(), get__time()), 'bodies')
        exec_path = os.getcwd()
        detector = ObjectDetection()
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(exec_path, 'models', 'resnet50_coco_best_v2.0.1.h5'))
        detector.loadModel()
        custom_obj = detector.CustomObjects(person=True)

        list = detector.detectCustomObjectsFromImage(
            custom_objects=custom_obj,
            input_image=file_name,
            output_image_path=os.path.join(exec_path, 'bodies', 'body.jpg'))
        image = face_recognition.load_image_file(
            os.path.join(exec_path, 'bodies', 'body.jpg'))
        counter = 1
        files = []
        for body in list:
            box_points = body['box_points']
            top = box_points[1]
            bottom = box_points[3]
            left = box_points[0]
            right = box_points[2]

            body_image = image[top:bottom, left:right]
            pil_image = Image.fromarray(body_image)
            pil_image.save(os.path.join(path, 'body{0}.jpg'.format(counter)))
            files.append(os.path.join(path, 'body{0}.jpg'.format(counter)))
            counter += 1
        return files
    except:
        return False
def crowdCount(input_image):
    '''
    Paths
    execution_path : Path of directory where the models are saved
    inpput_path : Django media path
    output_path : Django media path
    '''
    execution_path = 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\Models'
    input_path = 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\FrontEnd\\media'
    output_path = 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\FrontEnd\\media'

    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()

    output_image = '_'.join(["output", input_image])
    objects_present = detector.CustomObjects(person=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=objects_present,
        input_image=os.path.join(input_path, input_image),
        output_image_path=os.path.join(output_path, output_image),
        minimum_percentage_probability=29)
    people_count = len(detections)

    return people_count, output_image
Esempio n. 14
0
def getCoordsFromImageResnetCalibrate(image_name):
    execution_path = os.getcwd()

    #FIXME adjust image path
    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()

    custom_objects = detector.CustomObjects(car=True,
                                            motorcycle=True,
                                            cell_phone=True,
                                            truck=True,
                                            parking_meter=True,
                                            mouse=True,
                                            bowl=True,
                                            suitcase=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image=os.path.join(execution_path, image_name),
        output_image_path=os.path.join(execution_path,
                                       image_name[:-4] + "_processed.jpg"),
        minimum_percentage_probability=MIN_PROBABILITY)
    # detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path ,image_name), output_image_path=os.path.join(execution_path, image_name[:-4] + "_processed.jpg"), minimum_percentage_probability=MIN_PROBABILITY)

    coords = []

    for eachObject in detections:
        coords.append(eachObject['box_points'].tolist())

    return coords
class AIManager:
    def __init__(self):
        self.detector = ObjectDetection()
        self._init_detector_()
        self.custom_objects = self._init_custom_objects_()

    def _init_detector_(self):
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelPath(
            os.path.join(execution_path, resnet_50_layer_model))
        self.detector.loadModel()

    def _init_custom_objects_(self):
        return self.detector.CustomObjects(cup=True,
                                           wine_glass=True,
                                           fork=True,
                                           knife=True,
                                           spoon=True,
                                           bowl=True,
                                           banana=True,
                                           apple=True,
                                           sandwich=True,
                                           person=True,
                                           orange=True,
                                           bottle=True)

    def process_image(self, input_image_folder):
        start = time.time()
        image = input_image_folder.split('/')[-1]
        # rest_path = input_image_folder.split('/')[:-1]

        inp_image_folder = os.path.join(photos_path, input_image_folder)
        detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.custom_objects,
            input_image=inp_image_folder,
            output_image_path=f'{output_path}/{image}',
            input_type='file',
            output_type='file',
            minimum_percentage_probability=50)
        objects = []
        # Append the found objects
        for eachObject in detections:
            objects.append(eachObject)

        # If there are no objects found , delete the result photos
        if not objects:
            # os.remove(output_image_folder)
            os.remove(input_image_folder)
            end = time.time()
            print(end - start)
            return None, None

        # Convert image into base64
        with open(input_image_folder, "rb") as image_file:
            encoded_string = base64.b64encode(image_file.read())
        end = time.time()
        print(end - start)

        return objects, encoded_string
class WorkerDetector:
    def __init__(
            self,
            modelPath="/home/walker/catkin_ws/src/CapstoneBinTracking/data/resnet50_coco_best_v2.0.1.h5",
            debug=False,
            inspect=False):

        # Set debug flag
        self.debug = debug
        self.inspect = inspect

        # Initialize detector
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelPath(modelPath)

        # Set network to only detect people
        self.customObjs = self.detector.CustomObjects(person=True)

        # Available detection speed options: normal, fast, faster, fastest, flash
        self.detector.loadModel(detection_speed='fastest')

        print("Worker Detector ready.")

    def detectFromImage(self, imageRGB):

        # Perform detection
        detImg, detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.customObjs,
            input_image=imageRGB,
            input_type="array",
            output_type="array")

        if self.debug:
            print("Detected ", len(detections), " workers.")

        if self.inspect:
            plt.imshow(detImg)
            plt.show()

        # Extract bounding boxes from detection dictionaries
        bboxes = []
        for d in detections:
            if self.debug:
                print(type(d))
                print(d)

            d = d["box_points"]
            bboxes.append({
                "x1": d[0],
                "y1": d[1],
                "x2": d[2],
                "y2": d[3],
                "height": d[3] - d[1],
                "width": d[2] - d[0]
            })

        # Return list of bounding boxes
        return bboxes, detImg
Esempio n. 17
0
def check_people(img_path):
    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath('yolo.h5')
    detector.loadModel()
    peopleOnly = detector.CustomObjects(person=True)
    detectedImage, detections = detector.detectCustomObjectsFromImage(custom_objects=peopleOnly, output_type="array", input_image=img_path, minimum_percentage_probability=30)
    return len(detections)
def detection(output_file):
    # keyframe file for detection
    keyframe_file = os.path.join(output_file, "keyframe")

    # save detection results file for keyframe
    output_full_image_file = os.path.join(output_file, "result_keyframe")
    if not os.path.exists(output_full_image_file):
        os.makedirs(output_full_image_file)

    # save detection results file for cropped person
    output_crop_person_file = os.path.join(output_file, "result_crop")
    if not os.path.exists(output_crop_person_file):
        os.makedirs(output_crop_person_file)

    # detection
    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()
    videos_list = os.listdir(keyframe_file)
    videos_list.sort()
    for video in videos_list:
        # mkdir files for result
        output_video_keyframe_file = os.path.join(output_full_image_file,
                                                  video)
        if not os.path.exists(output_video_keyframe_file):
            os.makedirs(output_video_keyframe_file)

        output_video_crop_file = os.path.join(output_crop_person_file, video)
        if not os.path.exists(output_video_crop_file):
            os.makedirs(output_video_crop_file)

        # read keyframe and detect
        video_keyframe_dir = os.path.join(keyframe_file, video)
        keyframes_list = os.listdir(video_keyframe_dir)
        keyframes_list.sort()
        for keyframe in keyframes_list:
            keyframe_dir = os.path.join(video_keyframe_dir, keyframe)
            res_keyframe_dir = os.path.join(output_video_keyframe_file,
                                            keyframe)

            # detect and save results for keyframes
            custom_objects = detector.CustomObjects(person=True)
            detections = detector.detectCustomObjectsFromImage(
                custom_objects=custom_objects,
                input_image=keyframe_dir,
                output_image_path=res_keyframe_dir)
            ori_image = cv2.imread(keyframe_dir)
            count = 1
            for eachObject in detections:
                if eachObject["name"] == 'person':
                    bb = eachObject['box_points']
                    person_image = ori_image[bb[1]:bb[3], bb[0]:bb[2]]
                    person_image_dir = os.path.join(
                        output_video_crop_file,
                        keyframe[0:-4] + "_" + str(count).zfill(2) + ".jpg")
                    cv2.imwrite(person_image_dir, person_image)
                    count += 1
Esempio n. 19
0
class StartWindows(QMainWindow):
    def __init__(self, camera=None, parent=None):
        super(StartWindows, self).__init__(parent=parent)
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        self.detections = None
        self.frame = None
        self.files = []
        self.tmp = []

        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)
        #button

        #camera
        self.camera = cv2.VideoCapture(0)
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        #model

        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(
            os.path.join(self.execution_path, "yolo-tiny.h5"))
        self.detector.loadModel(detection_speed="fast")
        print("###you are use yolo_tiny model###")

    def update(self):

        ret, self.frame = self.camera.read()
        self.frame = cv2.flip(self.frame, 1)
        #detected
        custom = self.ui.comboBox_2.currentText()
        print(custom)
        custom_objects = self.detector.CustomObjects(bottle=True)
        detected_image_array, self.detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=custom_objects,
            input_type="array",
            input_image=self.frame,
            output_type="array")
        #detected_image_array, detections = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,output_type="array",input_type="array", input_image= frame,display_percentage_probability=True, display_object_name=True)
        for eachObject in self.detections:
            print(eachObject["name"], " : ",
                  eachObject["percentage_probability"], " : ",
                  eachObject["box_points"])

        #resize
        detected_image_array = cv2.resize(detected_image_array, (851, 471))
        height, width, channel = detected_image_array.shape
        bytesPerLine = 3 * width

        qImg = QImage(detected_image_array.data, width, height, bytesPerLine,
                      QImage.Format_RGB888).rgbSwapped()
        pixmap01 = QPixmap.fromImage(qImg)
        pixmap_image = QPixmap(pixmap01)
        self.ui.label.setPixmap(pixmap_image)

        self.ui.label.show()
Esempio n. 20
0
def detectsetting():
    print('hi~3')
    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath("/home/wndvlf96/abandog/yolo.h5")
    detector.loadModel()
    custom_objects = detector.CustomObjects(dog=True)
    print('Cropsetting completed')
    return detector, custom_objects
Esempio n. 21
0
def object_detector(filenames, filepaths):
    results_df = pd.DataFrame()
    images = {}

    execution_path = os.getcwd()

    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "models/resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()
    custom_objects = detector.CustomObjects(person=True, banana=True)
    for f, fp in zip(filenames, filepaths):

        try:
            detections = detector.detectCustomObjectsFromImage(
                input_image=fp,
                output_type='array',
                custom_objects=custom_objects,
                minimum_percentage_probability=10,
                extract_detected_objects=True)
        except:
            print(f'Error! Image {f} could not be used!')
            continue

        df = pd.DataFrame(detections[1])
        images[f] = Image.fromarray(detections[0])

        if 'banana' not in df.name.values:
            print(f'Error! No banana found in image {f}!')
            continue
        if 'person' not in df.name.values:
            print(f'Error! No person found in image {f}!')
            continue
        else:
            df['image'] = np.asarray(detections[2])
            df = df.sort_values(by=['percentage_probability'],
                                ascending=False).groupby('name').head(1)
            dict = {'filename': f}
            dict['image_x'] = detections[0].shape[0]
            dict['image_y'] = detections[0].shape[1]
            for row in df.values:
                dict[f'{row[1]}_box_point1'] = row[0][0]
                dict[f'{row[1]}_box_point2'] = row[0][1]
                dict[f'{row[1]}_box_point3'] = row[0][2]
                dict[f'{row[1]}_box_point4'] = row[0][3]
                dict[f'{row[1]}_pred'] = row[2]
                dict[f'{row[1]}_x'] = row[3].shape[0]
                dict[f'{row[1]}_y'] = row[3].shape[1]
            results_df = results_df.append(dict, ignore_index=True)
            print(f'{f} Predicted')

    return images, results_df
Esempio n. 22
0
def YoloV3_model(yolov3_model_path, b_tiny_version=False):
    '''
    Method that creates a YoloV3 model, using config from ImageAi core library.
    :param yolov3_model_path: the path of the config file
    :return: the YoloV3 model used to predict and the custom objects ( only poeple) to pass to the model
    during prediction.
    '''
    detector = ObjectDetection()
    if not b_tiny_version:
        detector.setModelTypeAsYOLOv3()
    else:
        detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(yolov3_model_path)
    custom_objects = detector.CustomObjects(person=True)
    detector.loadModel()
    return detector, custom_objects
Esempio n. 23
0
def detectsetting():
    print('hi~3')
    detector = ObjectDetection()
    # detector.setModelTypeAsYOLOv3()
    # detector.setModelPath("./yolo.h5")
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath("/home/wndvlf96/abandog/yolo.h5")
    #    detector.setModelTypeAsRetinaNet()
    #    detector.setModelPath('./resnet50_coco_best_v2.1.0.h5')
    #    detector.setModelTypeAsRetinaNet()
    #    detector.setModelPath("./yolo.h5")
    #    detector.setModelPath('./resnet50_coco_best_v2.1.0.h5')
    detector.loadModel()
    custom_objects = detector.CustomObjects(dog=True)
    print('Cropsetting completed')
    return detector, custom_objects
Esempio n. 24
0
def pic_con(old_path, new_path):
    execution_path = os.getcwd()

    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()
    custom_objects = detector.CustomObjects(person=True, car=False)
    detections = detector.detectCustomObjectsFromImage(
        input_image=os.path.join(execution_path, old_path),
        output_image_path=os.path.join(execution_path, new_path),
        custom_objects=custom_objects)  # , minimum_percentage_probability=65
    for eachObject in detections:
        print(eachObject["name"] + " : " +
              eachObject["percentage_probability"])
Esempio n. 25
0
    def identifyHuman(self,
                      inputFolder,
                      outputFolder="./humans/",
                      tempFolder="./temp/",
                      fps=24,
                      probability=30,
                      modelPath="./models/yolo.h5"):
        path = Path(inputFolder)  # To keep path structures tidy

        detector = ObjectDetection()
        detector.setModelTypeAsYOLOv3()

        detector.setModelPath(modelPath)
        detector.loadModel()
        custom = detector.CustomObjects(
            person=True, giraffe=True
        )  # Finding out if humans are in the image. And giraffes, because that would be hilarious.

        files = path.glob("**/*")
        # List of images with a human that will be turned into a video
        human_dict = {}

        for file in [x for x in files if x.is_file()]:
            temp_file = Path(tempFolder + file.name)

            detections = detector.detectCustomObjectsFromImage(
                custom_objects=custom,
                input_image=file,
                #output_type="array",
                output_image_path=str(temp_file),  # Keeping the same filename
                minimum_percentage_probability=probability)
            # Was a human detected?
            if any(d['name'] == "person" for d in detections):
                # Copying the image into the desired human folder
                human_dict[int(file.stem)] = str(
                    file)  # Using the filename as indx (should be integer)
                print("Human found in image:", file.name)

        #  Checking if any humans were found
        if (len(human_dict)):
            print("Found humans! Generating video.")
            # Since the image detection seems to be multi-threaded, the images need to be sorted
            human_list = list(human_dict)
            human_list.sort()
            self.video(human_dict, human_list, outputFolder + "test.avi")
Esempio n. 26
0
class ActorDetector:
    def __init__(self, screen: Video):
        self.__detector = ObjectDetection()
        self.__detector.setModelTypeAsRetinaNet()
        self.__detector.setModelPath(const.MODEL_FILE_PATH)
        self.__detector.loadModel(detection_speed="fastest")

        self.__custum_objects = self.__detector.CustomObjects(person=True)

        self.__screen_x: int = screen.width // 2
        self.__screen_y: int = screen.height // 2
        self.__screen_median: np.array = np.array(
            [self.__screen_x, self.__screen_y])

    def get_actor(self, imagePath: str) -> Optional[Person]:
        detections = self.__detector.detectCustomObjectsFromImage(
            self.__custum_objects,
            input_image=imagePath,
            output_type="array",
            minimum_percentage_probability=30,
        )

        persons = self.__extract_persons(detections)
        if 0 == len(persons):
            return None

        return self.__extract_actor(persons)

    def __extract_persons(self, detections) -> List[Person]:
        persons_list = []
        for d in detections[1]:
            persons_list.append(Person(d))
        return persons_list

    def __extract_actor(self, persons: List[Person]) -> Optional[Person]:
        actor = None
        for person in persons:
            # 画面の中心点に最も近いpersonをactorとする
            distance = np.linalg.norm(self.__screen_median - person.median)
            person.distance = distance
            if (actor is None) or (actor.distance > person.distance):
                actor = person
        return actor
Esempio n. 27
0
def get_boxes(image_dir):
    execution_path = os.getcwd()

    detector = ObjectDetection()

    # Choose Model type
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))

    custom = detector.CustomObjects(handbag=True,
                                    tie=True,
                                    frisbee=True,
                                    bottle=True,
                                    cup=True,
                                    fork=True,
                                    knife=True,
                                    spoon=True,
                                    bowl=True,
                                    banana=True,
                                    apple=True,
                                    sandwich=True,
                                    orange=True,
                                    broccoli=True,
                                    carrot=True,
                                    scissors=True,
                                    toothbrush=True,
                                    book=True,
                                    vase=True)

    detector.loadModel()
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom,
        input_image=os.path.join(execution_path, image_dir),
        output_image_path=os.path.join(execution_path, "dontcare.jpg"),
        minimum_percentage_probability=5)

    locs = []
    for eachObject in detections:
        #print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"])
        locs.append(eachObject["box_points"])
    return locs
Esempio n. 28
0
def controller():
    execution_path = os.getcwd()

    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()
    custom_objects = detector.CustomObjects(car=True,
                                            motorcycle=True,
                                            person=True)

    src = r'C:\Users\Rafael\Desktop\TESTE'
    path = os.listdir(src)
    contador = 1
    path_len = len(path) + 1

    for eachImage in path:
        img = os.path.join(src, eachImage)
        model(src, img, custom_objects, detector, eachImage)
        print(f'Imagem: {contador} de {path_len} processada')
        contador += 1
Esempio n. 29
0
class BodyDetector:
    def __init__(self, speed='normal'):
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsRetinaNet()
        self.load_model()
        self.detector.loadModel(detection_speed=speed)
        self.custom_objects = self.detector.CustomObjects(person=True)

    def load_model(self, model_path='../downloaded'):
        self.detector.setModelPath(
            os.path.join(model_path, 'resnet50_coco_best_v2.0.1.h5'))

    def process(self, image):
        latest_frame = image.copy()
        regions = []
        tmp_t = time.time()

        # body detection, set min confidence to 0.7
        detected_image_array, detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.custom_objects,
            input_type="array",
            input_image=latest_frame,
            output_type="array",
            minimum_percentage_probability=70)

        print(f'image detection time. {time.time() - tmp_t}')

        for person in detections:

            # wrap detected results to Region for following process
            region = Region()
            region.confidence = float(person['percentage_probability'])
            region.set_rect(left=person['box_points'][0],
                            right=person['box_points'][2],
                            top=person['box_points'][1],
                            bottom=person['box_points'][3])

            regions.append(region)

        return regions
Esempio n. 30
0
def get_bbox_yolo(path_to_images, path_to_weights='/home/user/aylifind/weights/yolo.h5', output_folder='/home/user/aylifind/output/'):
    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(path_to_weights)
    detector.loadModel()
    for curr_image in os.listdir(path_to_images):
        bboxes_per_image = []
        image_path = path_to_images + curr_image
        custom = detector.CustomObjects(person=True)
        detections = detector.detectCustomObjectsFromImage(input_image=image_path,
                                                           output_image_path=output_folder,
                                                           custom_objects=custom,
                                                           minimum_percentage_probability=30)
        for eachObject in detections:
            try:
                x1 = int(eachObject['box_points'][0])
                y1 = int(eachObject['box_points'][1])
                x2 = int(eachObject['box_points'][2])
                y2 = int(eachObject['box_points'][3])
                bboxes_per_image.append((x1, y1, x2, y2))
            except:
                continue
        return bboxes_per_image