Ejemplo n.º 1
0
class Detector:
    def __init__(self, model_path, speed):
        self.detector = ObjectDetection()

        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(model_path)
        self.detector.loadModel(detection_speed=speed)

    # Run object detection and return the two tags with highest probability.
    # In the case of no objects detected, tag="None"
    def detect(self, input_path):
        _, detection = self.detector.detectObjectsFromImage(
            input_image=input_path,
            output_type="array",
            minimum_percentage_probability=30)

        detection = sorted(detection,
                           key=lambda item: item["percentage_probability"],
                           reverse=True)

        objects = []
        for eachItem in detection:
            if eachItem["name"] not in objects:
                objects.append(eachItem["name"])
                if len(objects) == 2: break

        while len(objects) != 2:
            objects.append("None")

        return objects
Ejemplo n.º 2
0
def setUpNN(model_path):
    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model_path)
    detector.loadModel()

    return detector
Ejemplo n.º 3
0
def getYoloTiny(request):
    if request.method == "POST":
        f = request.FILES['sentFile']  # here you get the files needed
        execution_path = os.getcwd()
        detector = ObjectDetection()
        path = os.path.join(settings.MODELS, "yolo-tiny.h5")
        detector.setModelTypeAsTinyYOLOv3()
        detector.setModelPath(path)
        detector.loadModel()
        detections = detector.detectObjectsFromImage(input_image=f,
                                                     output_image_path='media/imagetest.jpg')
        item=[]
        object=''
        for eachObject in detections:
            item.append(eachObject["name"])
        if len(item) == 0:
            return render(request, 'failPredictions.html')
        categoryName = objectCategory.objects.filter(object__in=item).values_list('categoryName', flat=True)
        ac=AdvertisementCategory.objects.filter(id__in=categoryName)
        ads=Advertisement.objects.filter(categoryName__in=ac)
        if len(ads)==0:
            return render(request, 'failPredictions.html')
        context = {
            'ac':ac,
            'ads': ads,
        }
        return render(request, 'predictions.html', context)
    else:
        return render(request, 'homepage.html')
Ejemplo n.º 4
0
class StartWindows(QMainWindow):
    def __init__(self, camera=None, parent=None):
        super(StartWindows, self).__init__(parent=parent)
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        self.detections = None
        self.frame = None
        self.files = []
        self.tmp = []

        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)
        #button

        #camera
        self.camera = cv2.VideoCapture(0)
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        #model

        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(
            os.path.join(self.execution_path, "yolo-tiny.h5"))
        self.detector.loadModel(detection_speed="fast")
        print("###you are use yolo_tiny model###")

    def update(self):

        ret, self.frame = self.camera.read()
        self.frame = cv2.flip(self.frame, 1)
        #detected
        custom = self.ui.comboBox_2.currentText()
        print(custom)
        custom_objects = self.detector.CustomObjects(bottle=True)
        detected_image_array, self.detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=custom_objects,
            input_type="array",
            input_image=self.frame,
            output_type="array")
        #detected_image_array, detections = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,output_type="array",input_type="array", input_image= frame,display_percentage_probability=True, display_object_name=True)
        for eachObject in self.detections:
            print(eachObject["name"], " : ",
                  eachObject["percentage_probability"], " : ",
                  eachObject["box_points"])

        #resize
        detected_image_array = cv2.resize(detected_image_array, (851, 471))
        height, width, channel = detected_image_array.shape
        bytesPerLine = 3 * width

        qImg = QImage(detected_image_array.data, width, height, bytesPerLine,
                      QImage.Format_RGB888).rgbSwapped()
        pixmap01 = QPixmap.fromImage(qImg)
        pixmap_image = QPixmap(pixmap01)
        self.ui.label.setPixmap(pixmap_image)

        self.ui.label.show()
Ejemplo n.º 5
0
def tinyYoloImage(image, model, output):
    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model)
    detector.loadModel()
    start_time = time.time()
    detections = detector.detectObjectsFromImage(input_image=image,
                                                 output_image_path=output)
    for eachObject in detections:
        print(eachObject["name"], " : ", eachObject["percentage_probability"])
    print("Total time: %s seconds" % (time.time() - start_time))
Ejemplo n.º 6
0
    def ObjectDetect(self):
        execution_path = "C:\Tensorflow\models\Research\object_detection\Engine\customPrediction"
        print(execution_path)
        detector = ObjectDetection()
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
        detector.loadModel()
        detections1 = detector.detectCustomObjectsFromImage(
            input_image=os.path.join(execution_path, "trail1.jpg"),
            output_image_path=os.path.join(execution_path, "example3.jpg"))

        detector2 = ObjectDetection()
        detector2.setModelTypeAsYOLOv3()
        detector2.setModelPath(os.path.join(execution_path, "yolo.h5"))
        detector2.loadModel()
        detections2 = detector2.detectCustomObjectsFromImage(
            input_image=os.path.join(execution_path, "trail1.jpg"),
            output_image_path=os.path.join(execution_path, "example4.jpg"))

        detector3 = ObjectDetection()
        detector3.setModelTypeAsTinyYOLOv3()
        detector3.setModelPath(os.path.join(execution_path, "yolo-tiny.h5"))
        detector3.loadModel()
        detections3 = detector3.detectCustomObjectsFromImage(
            input_image=os.path.join(execution_path, "trail1.jpg"),
            output_image_path=os.path.join(execution_path, "example5.jpg"))

        prediction = CustomImagePrediction()
        prediction.setModelTypeAsResNet()
        prediction.setModelPath(
            os.path.join(execution_path, "model_ex-027_acc-0.843750.h5"))
        prediction.setJsonPath(os.path.join(execution_path,
                                            "model_class.json"))
        prediction.loadModel(num_objects=2)
        predictions, probabilities = prediction.predictImage(os.path.join(
            execution_path, "trail1.jpg"),
                                                             result_count=5)

        detections = detections1 + detections2 + detections3
        List = []
        for i in detections:
            List.append(i["name"])

        for eachPrediction, eachProbability in zip(predictions, probabilities):
            if eachProbability > 50:
                List.append(eachPrediction)
        """for eachObject in detections:
            print(eachObject["name"], " : ", eachObject["percentage_probability"])

        for eachPrediction, eachProbability in zip(predictions, probabilities):
            print(eachPrediction, " : ", eachProbability)"""
        return List
Ejemplo n.º 7
0
def load_detector(model_type, model_path, detection_speed="normal"):
    detector = ObjectDetection()
    if model_type == ModelType.YOLO:
        detector.setModelTypeAsYOLOv3()
    elif model_type == ModelType.YOLO_TINY:
        detector.setModelTypeAsTinyYOLOv3()
    elif model_type == ModelType.RES_NET:
        detector.setModelTypeAsRetinaNet()

    detector.setModelPath(model_path)
    detector.loadModel(detection_speed=detection_speed)
    return detector
Ejemplo n.º 8
0
    def run(self):
        detector = ObjectDetection(
        ) if self.type_file == "img" else VideoObjectDetection()

        settings = QSettings(CONFIG_FILE_NAME, QSettings.IniFormat)
        model = settings.value(KEY_SAVE_MODEL, MODELS[0], type=str)

        if model == MODELS[0]:
            detector.setModelTypeAsRetinaNet()
            file = MODEL_RETINA_NET
        elif model == MODELS[1]:
            detector.setModelTypeAsYOLOv3()
            file = MODEL_YOLOv3
        else:
            detector.setModelTypeAsTinyYOLOv3()
            file = MODEL_TINY_YOLOv3

        detector.setModelPath(file)
        detector.loadModel(self.window.detection_speed)

        if self.type_file == "img":
            detections = detector.detectObjectsFromImage(
                input_image=self.new_file,
                output_image_path=self.output_file,
                **self.window.getFunctionArg())

            self._print_table_txt({
                name: len(
                    list(
                        filter(
                            lambda elem: True
                            if elem["name"] == name else False, detections)))
                for name in set([obj["name"] for obj in detections])
            })

        elif self.type_file == "video":
            detector.detectObjectsFromVideo(
                input_file_path=self.new_file,
                output_file_path=self.output_file,
                video_complete_function=self.forFull,
                **self.window.getFunctionArg())
        else:
            camera = cv2.VideoCapture(self.window.index)

            detector.detectObjectsFromVideo(
                camera_input=camera,
                output_file_path=self.output_file,
                video_complete_function=self.forFull,
                **self.window.getFunctionArg())
Ejemplo n.º 9
0
def getDetModel(modelName):
    '''Get detector model given name.'''
    detector = ObjectDetection()
    if modelName == "retinanet":
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(pathModel, "resnet50_coco_best_v2.0.1.h5"))
    elif modelName == "yolo":
        detector.setModelTypeAsYOLOv3()
        detector.setModelPath(os.path.join(pathModel, "yolo.h5"))
    elif modelName == "yolo-tiny":
        detector.setModelTypeAsTinyYOLOv3()
        detector.setModelPath(os.path.join(pathModel, "yolo-tiny.h5"))
    detector.loadModel()
    return detector
Ejemplo n.º 10
0
class Vision:
    def __init__(self, model="model/yolo-tiny.h5"):
        self.model_path = model

    def detectImage(self, input_path, output_path):

        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(self.model_path)
        self.detector.loadModel()

        detection = self.detector.detectObjectsFromImage(
            input_image=input_path, output_image_path=output_path)

        return detection
Ejemplo n.º 11
0
class tinyyolo:
    def __init__(self):  # initializing model
        execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(
            os.path.join(execution_path, "models/yolo-tiny.h5"))
        self.detector.loadModel()

    def prediction(self, img_execution_path, pred_img_execution_path, img,
                   pred_img):  # predicting from model
        detections = self.detector.detectObjectsFromImage(
            input_image=os.path.join(img_execution_path, img),
            output_image_path=os.path.join(pred_img_execution_path, pred_img))
        return detections
Ejemplo n.º 12
0
def YoloV3_model(yolov3_model_path, b_tiny_version=False):
    '''
    Method that creates a YoloV3 model, using config from ImageAi core library.
    :param yolov3_model_path: the path of the config file
    :return: the YoloV3 model used to predict and the custom objects ( only poeple) to pass to the model
    during prediction.
    '''
    detector = ObjectDetection()
    if not b_tiny_version:
        detector.setModelTypeAsYOLOv3()
    else:
        detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(yolov3_model_path)
    custom_objects = detector.CustomObjects(person=True)
    detector.loadModel()
    return detector, custom_objects
Ejemplo n.º 13
0
def detect_object(image_name):
    detector = ObjectDetection()

    model_path = "/content/task/models/yolo-tiny.h5"
    input_path = "/content/task/ScreenShots/" + image_name
    output_path = "/content/task/output/new_" + image_name

    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model_path)

    detector.loadModel()
    detection = detector.detectObjectsFromImage(
        input_image=input_path,
        output_image_path=output_path,
        minimum_percentage_probability=20)
    return detection
Ejemplo n.º 14
0
def detect_photo(model_path="static/other/yolo-tiny.h5",
                 input_path="static/other/ai_photo.jpg",
                 output_path="static/other/ai_photo_after.jpg"):
    detector = ObjectDetection()

    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model_path)
    detector.loadModel()
    detection = detector.detectObjectsFromImage(
        input_image=input_path,
        output_image_path=output_path,
        minimum_percentage_probability=40)

    names = [d['name'] for d in detection]
    counts = {n: names.count(n) for n in set(names)}

    return counts
Ejemplo n.º 15
0
class Detector:
    def __init__(self, model_path, output_path, language, translator):
        self.detector = None
        self.detection = None
        self.detection_items = []
        self.model_path = model_path
        self.output_path = output_path
        self.detection_text = ''
        self.jpg_id = 0
        self.photo_id = 0
        self.language = language
        self.translator = translator
        self.cam = None

    def initialize(self, cam_number):
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(self.model_path)
        self.detector.loadModel()

    def get_detection(self, input_path):
        output_image = self.output_path + str(self.jpg_id) + '.jpg'
        self.detection = self.detector.detectObjectsFromImage(
            input_image=input_path, output_image_path=output_image)
        self.detection_items = []
        self.detection_text = ''
        for item in self.detection:
            text = self.translator.translate(item["name"],
                                             dest=self.language).text
            self.detection_items.append(item)
            self.detection_text = self.detection_text + text + '. '
        self.jpg_id += 1
        return self.detection_text, output_image

    def take_picture(self):
        video_capture = cv2.VideoCapture(0)
        if not video_capture.isOpened():
            return False, None
        ret, frame = video_capture.read()
        video_capture.release()
        # im = Image.fromarray(crop_center(frame[:,:,::-1],350,350))
        im = Image.fromarray(frame[:, :, ::-1])
        picture_name = "pictures/{}.jpg".format(self.photo_id)
        im.save(picture_name)
        self.photo_id += 1
        return True, picture_name
Ejemplo n.º 16
0
def test_object_detection_tiny_yolov3_array_io():

    try:
        keras.backend.clear_session()
    except:
        None

    image_input_array = cv2.imread(image_input)

    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(
        os.path.join(main_folder, "data-models", "yolo-tiny.h5"))
    detector.loadModel()
    detected_array, results = detector.detectObjectsFromImage(
        input_image=image_input_array,
        input_type="array",
        minimum_percentage_probability=40,
        output_type="array")

    assert isinstance(detected_array, ndarray)
    assert isinstance(results, list)
    for result in results:
        assert isinstance(result["name"], str)
        assert isinstance(result["percentage_probability"], float)
        assert isinstance(result["box_points"], tuple)

    detected_array, results2, extracted_arrays = detector.detectObjectsFromImage(
        input_image=image_input,
        output_image_path=image_output,
        minimum_percentage_probability=40,
        extract_detected_objects=True,
        output_type="array")

    assert isinstance(results2, list)
    assert isinstance(extracted_arrays, list)
    for result2 in results2:
        assert isinstance(result2["name"], str)
        assert isinstance(result2["percentage_probability"], float)
        assert isinstance(result2["box_points"], tuple)

    for extracted_array in extracted_arrays:
        assert isinstance(extracted_array, ndarray)
Ejemplo n.º 17
0
def detect_people():
    # object detection
    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath('yolo-tiny.h5')
    detector.loadModel()
    detections = detector.detectObjectsFromImage(
        input_image="image.jpeg", output_image_path="imagenew.jpg")

    # sort detected objects into "persons", with probability 70%+
    detected_people = []
    for eachObject in detections:
        if eachObject["name"] == "person" and eachObject[
                "percentage_probability"] > 60:
            detected_people.append(eachObject)

    # print all detected_people
    for dp in detected_people:
        print(dp)
    return detected_people
Ejemplo n.º 18
0
def test_object_detection_tiny_yolov3():
    try:
        keras.backend.clear_session()
    except:
        None
    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(
        os.path.join(main_folder, "data-models", "yolo-tiny.h5"))
    detector.loadModel()
    results = detector.detectObjectsFromImage(
        input_image=image_input,
        output_image_path=image_output,
        minimum_percentage_probability=40)

    assert isinstance(results, list)
    for result in results:
        assert isinstance(result["name"], str)
        assert isinstance(result["percentage_probability"], float)
        assert isinstance(result["box_points"], tuple)
    assert os.path.exists(image_output)
    os.remove(image_output)

    results2, extracted_paths = detector.detectObjectsFromImage(
        input_image=image_input,
        output_image_path=image_output,
        minimum_percentage_probability=40,
        extract_detected_objects=True)

    assert isinstance(results2, list)
    assert isinstance(extracted_paths, list)
    assert os.path.isdir(os.path.join(image_output + "-objects"))
    for result2 in results2:
        assert isinstance(result2["name"], str)
        assert isinstance(result2["percentage_probability"], float)
        assert isinstance(result2["box_points"], tuple)

    for extracted_path in extracted_paths:
        assert os.path.exists(extracted_path)

    shutil.rmtree(os.path.join(image_output + "-objects"))
Ejemplo n.º 19
0
def predict_path():
    if request.method == 'POST':

        path = request.args.get('path')
        import os
        cwd = os.getcwd()
        print(cwd)
        detector = ObjectDetection()

        model_path = "./models/yolo-tiny.h5"
        input_path = path
        output_path = "C:/Users/bgranat/Desktop/finalhtml/final/rest-api/target/classes/static/" + path.rsplit(
            '/', 1)[-1] + "newimage.jpg"

        detector.setModelTypeAsTinyYOLOv3()
        detector.setModelPath(model_path)
        detector.loadModel()
        detection = detector.detectObjectsFromImage(
            input_image=input_path, output_image_path=output_path)
        jsondata = {}

        rf1 = joblib.load("./models/x.pkl")
        rf2 = joblib.load("./models/y.pkl")
        i = 0

        for eachItem in detection:
            if (eachItem["name"] == "person"):
                x1 = eachItem["box_points"][0]
                y1 = eachItem["box_points"][1]
                x2 = eachItem["box_points"][2]
                y2 = eachItem["box_points"][3]

                x = rf1.predict([[x1, x2]])
                y = rf2.predict([[y1, y2]])
                print(x, y)
                point = makeANewPoint(str(math.floor(x)), str(math.floor(y)),
                                      input_path)
                jsondata[i] = point
                i = i + 1

        return jsonify(jsondata)
Ejemplo n.º 20
0
def hello():
    K.clear_session()

    if request.method == 'POST':
        detector = ObjectDetection()
        detector.setModelTypeAsTinyYOLOv3()
        detector.setModelPath("yolo-tiny.h5")
        detector.loadModel()
        algo = request.form['algo']
        if 'file_input' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file_input']

        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)

        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file_data = file.stream.read()
            nparr = np.fromstring(file_data, np.uint8)
            img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

            detect_img, result = detector.detectObjectsFromImage(
                input_image=img,
                input_type="array",
                output_type="array",
                minimum_percentage_probability=80)
            K.clear_session()
            detect_img = cv2.cvtColor(detect_img, cv2.COLOR_BGR2RGB)
            cv2.imwrite('detect_img.jpg', detect_img)
            img_str = cv2.imencode('.jpg', detect_img)[1].tostring()
            encoded = base64.b64encode(img_str).decode("utf-8")
            mime = "image/jpg;"
            out_image = f"data:{mime}base64,{encoded}"
            return render_template('result.html', out_image=out_image)
        else:
            return "File extension not supported"
    return render_template('index.html')
Ejemplo n.º 21
0
    def __init__(self, model):
        execution_path = os.getcwd()
        detector = ObjectDetection()

        if model == 'yolo':
            detector.setModelTypeAsYOLOv3()
        elif model == 'yolo-tiny':
            detector.setModelTypeAsTinyYOLOv3()
        else:
            raise ValueError(
                'Model ' + model +
                'not fould. you should download the model and put it into "modules" directory.'
            )

        detector.setModelPath(
            os.path.join(execution_path,
                         '../input/models-detection/' + model + '.h5'))
        detector.loadModel()
        custom_objects = detector.CustomObjects(car=True)

        self.detector = detector
        self.custom_objects = custom_objects
        self.execution_path = execution_path
Ejemplo n.º 22
0
def objectDetect():
    input_path = input_path_entry.get()
    input_path_entry.delete(0, tk.END)
    output_path = output_path_entry.get()
    output_path_entry.delete(0, tk.END)

    detector = ObjectDetection()

    model_path = "./models/yolo-tiny.h5"
    output_path = output_path + '/newimage.jpg'

    detector.setModelTypeAsTinyYOLOv3()

    detector.setModelPath(model_path)

    detector.loadModel()

    detections = detector.detectObjectsFromImage(input_image=input_path,
                                                 output_image_path=output_path)

    for eachObject in detections:
        print(eachObject["name"], " : ", eachObject["percentage_probability"],
              " : ", eachObject["box_points"])
        print("--------------------------------")
Ejemplo n.º 23
0
def load_model(model='yolo'):
    '''
    :type: model: str
    :rtype: detector
    Input model name and output the corresponding model
    '''
    # Load object detection model
    execution_path = os.getcwd()
    detector = ObjectDetection()
    if model == 'yolo':
        detector.setModelTypeAsYOLOv3()
        detector.setModelPath(
            os.path.join(execution_path, "pretrained_model/yolo.h5"))
    elif model == 'resnet':
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(execution_path,
                         "pretrained_model/resnet50_coco_best_v2.0.1.h5"))
    elif model == 'tinyyolo':
        detector.setModelTypeAsTinyYOLOv3()
        detector.setModelPath(
            os.path.join(execution_path, "pretrained_model/yolo-tiny.h5"))
    detector.loadModel()
    return detector
Ejemplo n.º 24
0
from imageai.Detection import ObjectDetection
import cv2
"""Create instance of Object Detection class"""

det = ObjectDetection()
"""Setting paths for input image, output image and pretrained model weights of tiny yolo"""

model_path = "yolo-tiny.h5"
input_path = "input.jpg"
output_path = "prediction_output.jpg"
"""Setting the model to tiny yolov3 and loading the weights from the specified path"""

det.setModelTypeAsTinyYOLOv3()
det.setModelPath(model_path)
det.loadModel()
"""Detecting objects from the image and displaying the label if the prediction has minimum 0.1 probability."""

detection = det.detectObjectsFromImage(input_image=input_path,
                                       output_image_path=output_path,
                                       minimum_percentage_probability=0.1)
"""Result: Input and output image"""

i1 = cv2.imread("input.jpg")
cv2.imshow(i1)
i2 = cv2.imread("prediction_output.jpg")
cv2.imshow(i2)
"""Total objects detected"""

print("Enter object name to get the box location")
print("Available options are:")
count = 1
class StartWindows(QMainWindow):
    def __init__(self,camera=None, parent=None):
        super(StartWindows, self).__init__(parent=parent)
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        self.detections = None
        self.frame = None
        self.files=[]
        self.tmp=[]
        
        
#detector
        
       

#button
        self.ui.pushButton_3.clicked.connect(self.capture)
        self.ui.pushButton_2.clicked.connect(self.start)
        self.ui.pushButton.clicked.connect(self.stop)
       

#camera
        self.camera = cv2.VideoCapture(0)
#timer
        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)
       

    def start(self):
        model=self.ui.comboBox.currentText()
        print(model)
        
        if model == "YOLO V3":
            self.yolo()
        elif model == "YOLO TINY":
            self.yolo_tiny()
        elif model == "RESNET":
            self.resnet()


        
    def capture (self):
    
      
        
        
        d = self.detections
        for eachObject in d:
            ts = time.time()
            txt=str(ts)
            box = eachObject["box_points"];
            x = box[0]
            y = box[1]
            w = box[2]
            h = box[3]
            crop_img = self.frame[y:y+h, x:x+w]
            txt=str(ts)
            cv2.imwrite(txt+'.png',crop_img)
        for file in os.listdir("D:/dec/Detection/guifinish/New folder"):
           
            if file.endswith(".png"):
                
                self.files.append(os.path.join(os.getcwd(), file))
                self.tmp=self.files
        
        self.ui.listWidget.clear()
        for x in self.files:
          
            
            
            print(x)   
            item = QListWidgetItem()
            item.setIcon(QIcon(x))
            
            self.ui.listWidget.addItem(item)
            
            self.files=[]
            
            '''
        for file in os.listdir("D:/dec/Detection/guifinish"):
            
            if file.endswith(".png"):
                self.files.append(os.path.join(os.getcwd(), file))
    
        for x in self.files:
            print(x)   
            item = QListWidgetItem()

            item.setIcon(QIcon(x))
            self.ui.listWidget.addItems(item)
            
            '''     
            
       
        
        
        
 
    def yolo(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath( os.path.join(self.execution_path , "yolo.h5"))
        self.detector.loadModel(detection_speed="fast")
        print("###you are use yolo model###")
    def yolo_tiny(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath( os.path.join(self.execution_path , "yolo-tiny.h5"))
        self.detector.loadModel(detection_speed="flash")
        print("###you are use yolo_tiny model###")

    def resnet(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelPath( os.path.join(self.execution_path , "resnet50_coco_best_v2.0.1.h5"))
        self.detector.loadModel(detection_speed="fast")
        print("###you are use resnet model###")

    def stop(self):
        self.update_timer.stop()
        

    def update(self):
        
        ret,self.frame =self.camera.read()
        self.frame=cv2.flip(self.frame,1)
        #detected
        custom=self.ui.comboBox_2.currentText()
        print(custom)

        if custom == "ALL":
            custom_objects =None
             
        elif custom == "Person":
            custom_objects = self.detector.CustomObjects(person=True)
        elif custom == "Orange":
            custom_objects = self.detector.CustomObjects(orange=True)
        elif custom == "Cell Phone":
            custom_objects = self.detector.CustomObjects(cell_phone=True)
        elif custom == "Bicycle":
            custom_objects = self.detector.CustomObjects(bicycle=True)
        elif custom == "Car":
            custom_objects = self.detector.CustomObjects(car=True)
        elif custom == "Motorcycle":
            custom_objects = self.detector.CustomObjects(motorcycle=True)
        elif custom == "Airplane":
            custom_objects = self.detector.CustomObjects(airplane=True)
        elif custom == "Bus":
            custom_objects = self.detector.CustomObjects(bus=True)
        elif custom == "Train":
            custom_objects = self.detector.CustomObjects(train=True)
        elif custom == "Truck":
            custom_objects = self.detector.CustomObjects(truck=True)
        elif custom == "Boat":
            custom_objects = self.detector.CustomObjects(boat=True)
        elif custom == "Traffic Light":
            custom_objects = self.detector.CustomObjects(traffic_light=True)
        elif custom == "Fire Hydrant":
            custom_objects = self.detector.CustomObjects(fire_hydrant=True)
        elif custom == "Stop Sign":
            custom_objects = self.detector.CustomObjects(stop_sign=True)
        elif custom == "Giraffe":
            custom_objects = self.detector.CustomObjects(giraffe=True)
        elif custom == "Backpack":
            custom_objects = self.detector.CustomObjects(backpack=True)
        elif custom == "Umbrella":
            custom_objects = self.detector.CustomObjects(umbrella=True)
        elif custom == "Handbag":
            custom_objects = self.detector.CustomObjects(handbag=True)
        elif custom == "Tie":
            custom_objects = self.detector.CustomObjects(tie=True)
        elif custom == "Suitcase":
            custom_objects = self.detector.CustomObjects(suitcase=True)
        elif custom == "Frisbee":
            custom_objects = self.detector.CustomObjects(frisbee=True)
        #end first
            
        elif custom == "Skis":
            custom_objects = self.detector.CustomObjects(skis=True)
        elif custom == "Snowboard":
            custom_objects = self.detector.CustomObjects(snowboard=True)
        elif custom == "Sports Ball":
            custom_objects = self.detector.CustomObjects(sports_ball=True)
        elif custom == "Kite":
            custom_objects = self.detector.CustomObjects(kite=True)
        elif custom == "Baseball Bat":
            custom_objects = self.detector.CustomObjects(baseball_bat=True)
        elif custom == "Baseball Glove":
            custom_objects = self.detector.CustomObjects(baseball_glove=True)
        elif custom == "Skateboard":
            custom_objects = self.detector.CustomObjects(skateboard=True)
        elif custom == "Surfboard":
            custom_objects = self.detector.CustomObjects(surfboard=True)
        elif custom == "Tennis Rack":
            custom_objects = self.detector.CustomObjects( tennis_racket=True)
        elif custom == "Bottle":
            custom_objects = self.detector.CustomObjects(bottle=True)
        elif custom == "Wine Glass":
            custom_objects = self.detector.CustomObjects(wine_glass=True)
        elif custom == "Cup":
            custom_objects = self.detector.CustomObjects(cup=True)
        elif custom == "Fork":
            custom_objects = self.detector.CustomObjects(fork=True)
        elif custom == "Knife":
            custom_objects = self.detector.CustomObjects(knife=True)
        elif custom == "Spoon":
            custom_objects = self.detector.CustomObjects(spoon=True)
        elif custom == "Bowl":
            custom_objects = self.detector.CustomObjects(bowl=True)
        elif custom == "Banana":
            custom_objects = self.detector.CustomObjects(banana=True)
        elif custom == "Apple":
            custom_objects = self.detector.CustomObjects(apple=True)
        #end seco
        elif custom == "Sandwich":
            custom_objects = self.detector.CustomObjects(sandwich=True)
        elif custom == "Broccoli":
            custom_objects = self.detector.CustomObjects(broccoli=True)
        elif custom == "Carrot":
            custom_objects = self.detector.CustomObjects(carrot=True)
        elif custom == "Hot Dog":
            custom_objects = self.detector.CustomObjects(hot_dog=True)
        elif custom == "Pizza":
            custom_objects = self.detector.CustomObjects(pizza=True)
        elif custom == "Cake":
            custom_objects = self.detector.CustomObjects(cake=True)
        elif custom == "Chair":
            custom_objects = self.detector.CustomObjects(chair=True)
        elif custom == "Couch":
            custom_objects = self.detector.CustomObjects(couch=True)
        elif custom == "Potted Plant":
            custom_objects = self.detector.CustomObjects(potted_plant=True)
        elif custom == "bed":
            custom_objects = self.detector.CustomObjects(bed=True)
        elif custom == "Dining Table":
            custom_objects = self.detector.CustomObjects(dining_table=True)
        elif custom == "Toilet":
            custom_objects = self.detector.CustomObjects(toilet=True)
        elif custom == "Tv":
            custom_objects = self.detector.CustomObjects(tv=True)
        elif custom == "Laptop":
            custom_objects = self.detector.CustomObjects(laptop=True)
        elif custom == "Mouse":
            custom_objects = self.detector.CustomObjects(mouse=True)
        elif custom == "Remote":
            custom_objects = self.detector.CustomObjects(remote=True)
        elif custom == "Keyboard":
            custom_objects = self.detector.CustomObjects(keyboard=True)
        elif custom == "Microwave":
            custom_objects = self.detector.CustomObjects(microwave=True)
        elif custom == "Oven":
            custom_objects = self.detector.CustomObjects(oven=True)
        elif custom == "Toaster":
            custom_objects = self.detector.CustomObjects(toaster=True)
        elif custom == "Sink":
            custom_objects = self.detector.CustomObjects(sink=True)
        #end th
        elif custom == "Refrigerator":
            custom_objects = self.detector.CustomObjects(refrigerator=True)
        elif custom == "Book":
            custom_objects = self.detector.CustomObjects(book=True)
        elif custom == "Clock":
            custom_objects = self.detector.CustomObjects(clock=True)
        elif custom == "Vase":
            custom_objects = self.detector.CustomObjects(vase=True)
        elif custom == "Scissors":
            custom_objects = self.detector.CustomObjects(scissors=True)
        elif custom == "Teddy Bear":
            custom_objects = self.detector.CustomObjects(teddy_bear=True)
        elif custom == "Hair Dryer":
            custom_objects = self.detector.CustomObjects(hair_dryer=True)
        elif custom == "Toothbrush":
            custom_objects = self.detector.CustomObjects(toothbrush=True)
        #end fo
        elif custom == "Parking Meter":
            custom_objects = self.detector.CustomObjects(parking_meter=True)
        elif custom == "Bench":
            custom_objects = self.detector.CustomObjects(bench=True)
        elif custom == "Bird":
            custom_objects = self.detector.CustomObjects(bird=True)
        elif custom == "Cat":
            custom_objects = self.detector.CustomObjects(cat=True)
        elif custom == "Dog":
            custom_objects = self.detector.CustomObjects(dog=True)
        elif custom == "Horse":
            custom_objects = self.detector.CustomObjects(horse=True)
        elif custom == "Sheep":
            custom_objects = self.detector.CustomObjects(sheep=True)
        elif custom == "Cow":
            custom_objects = self.detector.CustomObjects(cow=True)
        elif custom == "Elephant":
            custom_objects = self.detector.CustomObjects(elephant=True)
        elif custom == "Bear":
            custom_objects = self.detector.CustomObjects(bear=True)
        elif custom == "Zebra":
            custom_objects = self.detector.CustomObjects(zebra=True)
        #end all
            
            
      
        
        detected_image_array,self.detections = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,input_type="array", input_image=self.frame , output_type="array")
        #detected_image_array, detections = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,output_type="array",input_type="array", input_image= frame,display_percentage_probability=True, display_object_name=True)
        for eachObject in self.detections:
            print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
    
            
            
        #resize
        detected_image_array = cv2.resize(detected_image_array,(851,471))
        height, width, channel = detected_image_array.shape
        bytesPerLine = 3 * width

        qImg = QImage(detected_image_array.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
        pixmap01 = QPixmap.fromImage(qImg)
        pixmap_image = QPixmap(pixmap01)
        self.ui.label.setPixmap(pixmap_image)

              
        self.ui.label.show();
Ejemplo n.º 26
0
#In this python3 file we use a pretrained model to predict on images

#Import ObjectDetection class from the ImageAI library.
from imageai.Detection import ObjectDetection

#Creating an instance of the  image-ai detector
detector = ObjectDetection()

#Set up constant file paths
model_path = "./models/yolo-tiny.h5"
input_path = "./input/test45.jpg"
output_path = "./output/newimage.jpg"

#Let the computer know, that we''l be using the YOLOv3 model
detector.setModelTypeAsTinyYOLOv3()

#Load model
detector.loadModel()

#Detect
detection = detector.detectObjectsFromImage(input_image=input_path,
                                            output_image_path=output_path)

#Get probabilities for each category
for eachItem in detection:
    print(eachItem["name"], " : ", eachItem["percentage_probability"])
Ejemplo n.º 27
0
    def extractor(self, image_objects):

        #inizializzo la lista delle classi da chiamare per verificare le condizioni
        lista_classi = []
        #Estraggo la lista delle differenti tipologie di condizione
        lista_condizioni = list(self.condition_list.keys())

        if 'size' in lista_condizioni:
            '''
            Appendo a 'lista_classi' le diverse chiamate alla classe
            che gestisce la condizione sulla 'size' (dimensione del file). 
            Creo in questo modo due oggetti diversi, uno per soddisfare la
            dimensione minima e uno per la dimensione massima.
            '''
            min_value = self.condition_list['size']['min']
            max_value = self.condition_list['size']['max']
            lista_classi.append(
                SizeChecker(self.condition_list, min_value, max_value))

        if 'time' in lista_condizioni:

            #Appendo a 'lista_classi' le diverse chiamate alla classe
            #che gestisce la condizione sul 'time' (data di creazione del file).

            min_value = self.condition_list['time']['min']
            max_value = self.condition_list['time']['max']
            lista_classi.append(
                TimeChecker(self.condition_list, min_value, max_value))

        if 'wordlist' in lista_condizioni:
            '''
            Appendo a 'lista_classi' le diverse chiamate alla classe
            che gestisce la condizione sulla 'wordlist' (lista delle parole cercate).
            Creo tanti oggetti della classe 'OccurrenceChecker' quante sono le coppie parola-occorrenza cercate.
            '''
            for parola in self.condition_list['wordlist'].keys():
                lista_classi.append(
                    OccurrenceChecker(parola,
                                      self.condition_list['wordlist'][parola]))

        if 'objectlist' in lista_condizioni:

            detector = ObjectDetection()

            model_path = "./models/yolo-tiny.h5"
            detector.setModelTypeAsTinyYOLOv3()
            detector.setModelPath(model_path)
            detector.loadModel()
            #Appendo a 'lista_classi' le diverse chiamate alla classe
            #che gestisce la condizione sulla 'objectlist' (lista degli oggetti cercati).

            for obj in self.condition_list['objectlist'].keys():

                #controllo se l'oggetto è tra quelli riconoscibili dall'algoritmo

                if obj in image_objects:

                    lista_classi.append(
                        ImageChecker(self.condition_list, obj,
                                     self.condition_list['objectlist'][obj],
                                     detector))

        return lista_classi  #Restituisco la lista contenente la chiamata alle classi per ogni specifica condizione
Ejemplo n.º 28
0
class ObjectDetector:

    model_class_labels = [
        "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
        "truck", "boat", "traffic light", "fire hydrant", "stop sign",
        "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
        "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
        "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
        "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
        "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
        "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
        "broccoli", "carrot", "hot dog", "pizza", "donot", "cake", "chair",
        "couch", "potted plant", "bed", "dining table", "toilet", "tv",
        "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
        "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
        "scissors", "teddy bear", "hair dryer", "toothbrush"
    ]

    def __init__(self, model_name, model_path):
        self.model_path = model_path
        self.model_name = model_name
        if model_name == 'other':
            self.object_detector = DetectorAPI(path_to_ckpt=model_path)
        else:
            self.object_detector = ObjectDetection()
            if model_name == 'yolov3':
                self.object_detector.setModelTypeAsYOLOv3()
            elif model_name == 'tinyyolov3':
                self.object_detector.setModelTypeAsTinyYOLOv3()
            elif model_name == 'retinanet':
                self.object_detector.setModelTypeAsRetinaNet()
            self.object_detector.setModelPath(model_path)
            self.object_detector.loadModel()

    def detect(self,
               input_image_path,
               output_image_path,
               extract_detected_objects=True,
               display_percentage_probability=False,
               display_object_name=False,
               threshold=0.7):

        if self.model_name == 'other':
            detected_objects_location = []
            img = cv2.imread(input_image_path)
            boxes, scores, classes, _ = self.object_detector.process_frame(img)
            detections = []
            for i in range(len(boxes)):
                if scores[i] > threshold:
                    detections.append(
                        {'name': self.model_class_labels[classes[i] - 1]})
                    box = boxes[i]
                    cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]),
                                  (255, 0, 0), 2)
            cv2.imwrite(output_image_path, img)
        else:
            detections, detected_objects_location = self.object_detector.detectObjectsFromImage(
                input_image=input_image_path,
                output_image_path=output_image_path,
                extract_detected_objects=extract_detected_objects,
                display_percentage_probability=display_percentage_probability,
                display_object_name=display_object_name)
        return detections, detected_objects_location
Ejemplo n.º 29
0
class CountingObject(object):
    """
    A class of counting objects
    """

    algos = {
        "resnet": "resnet50_coco_best_v2.0.1.h5",
        "yolov3": "yolo.h5",
        "yolo_tiny": "yolo-tiny.h5"
    }

    def __init__(self, stream_link):
        self.stream_link = stream_link
        self.streams = streamlink.streams(stream_link)
        if self.streams is None:
            raise ValueError("cannot open the stream link %s" % stream_link)

        q = list(self.streams.keys())[0]
        self.stream = self.streams['%s' % q]

        self.target_img_path = os.getcwd()

        self.detector = ObjectDetection()
        if self.detector is None:
            raise ValueError("Detector of objects is None")

    def detector_init(self, algo="resnet", speed="nomal"):
        """
        Must be invoked after instantiate for initialize a object detector. 
        
        Args:
            algo (str): The algorithm of object detection tasks. "resnet"(default), "yolov3", "yolo_tiny".
            speed (str): The detection speed for object detetion tasks. "normal"(default), "fast", "faster" , "fastest" and "flash".
        
        Returns:
            void
        
        """

        if algo == "resnet":
            self.detector.setModelTypeAsRetinaNet()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["resnet"]))
        elif algo == "yolov3":
            self.detector.setModelTypeAsYOLOv3()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["yolov3"]))
        elif algo == "yolo_tiny":
            self.detector.setModelTypeAsTinyYOLOv3()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["yolo_tiny"]))
        else:
            print("Given algorithm of object detection is invalid.")
            return

        self.detector.loadModel(detection_speed=speed)
        self.custom_objects = self.detector.CustomObjects(person=True)

    def put_text_to_img(self,
                        img,
                        text,
                        pos=(50, 50),
                        fontColor=(0, 0, 255),
                        lineType=2):
        """
        Put text to an image.
        
        Args:
            img : An image represented by numpy array. You can use cv2.imread(path_to_iamge) to read an image in the filesystem by
                    giving the image path.
            text (str): The text what you want to put to the image.
            pos (tuple): x and y position relative to the origin (0,0) at the top left.
            fontColor (tuple): R G B channel.
            lineType (int): Type of line.
        
        Returns:
            void
        
        """
        if img is None:
            print("Put text to a none image.")
            return

        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 1

        cv2.putText(img, text, pos, font, fontScale, fontColor, lineType)

    def capture_frame_by_stream_wrapper(self,
                                        image_prefix="stream",
                                        mprob=30,
                                        num_im=6,
                                        time_interval=10,
                                        tz=None):
        """
        A wrapper of the function capture_frame_by_stream.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            mprob (int): Minimum probability to be a person.
            num_im (int): How many images will be taken.
            time_interval (int): Time interval of taking next image, the unit is second.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.
        
        Returns:
            void
        
        """
        print(
            "The current conuting function is based on capture frame by stream."
        )

        dir_path = os.path.join(self.target_img_path, image_prefix)
        if not os.path.isdir(dir_path):
            os.makedirs(dir_path)
        frames_res = []
        if num_im <= 0:
            try:
                i = 0
                while True:
                    i = i + 1
                    frame_res = self.capture_frame_by_stream(
                        image_prefix, i, mprob, tz)
                    frames.res.append(frame_res)
                    time.sleep(time_interval)
            except KeyboardInterrupt:
                return frames_res
                print('Abort by key interrupt.')
        else:
            for i in range(num_im):
                frame_res = self.capture_frame_by_stream(
                    image_prefix, i, mprob, tz)
                frames_res.append(frame_res)
                time.sleep(time_interval)

            return frames_res

    def capture_frame_by_stream(self,
                                image_prefix="stream",
                                image_index=0,
                                mprob=30,
                                tz=None) -> int:
        """
        capture a frame from a online stream, namely webcam.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            image_index (int): The postfix of target images. By default, numerated from 0.
            mprob (int): Minimum probability to be a person.
		    tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

		
        Returns:
            tuple: The name of target image, the number of persons in an image detected by the model and the current time.
        """

        video_cap = cv2.VideoCapture(self.stream.url)
        dir_path = os.path.join(self.target_img_path, image_prefix)

        if video_cap is None:
            print("Open webcam [%s] failed." % self.stream.url)
            return None
        else:
            ret, frame = video_cap.read()

            if not ret:
                print("Captured frame is broken.")
                video_cap.release()
                return None
            else:
                print("-----------------------------------------------------")

                if tz is None:
                    current_time = datetime.utcnow().strftime(
                        "%a %Y-%m-%d %H:%M:%S")
                    print('### time zone is None, therefore use utc time ###')
                else:
                    current_time = datetime.now(
                        timezone(tz)).strftime("%a %Y-%m-%d %H:%M:%S")

                print("Capturing frame %d." % image_index)
                target_img_name = "{}{}.png".format(image_prefix, image_index)
                # frame = crop_frame(frame, target_img_name)  # comment to unuse the crop function.

                cv2.imwrite(os.path.join(dir_path, target_img_name), frame)

                detections = self.detector.detectCustomObjectsFromImage(
                    custom_objects=self.custom_objects,
                    input_image=os.path.join(dir_path, target_img_name),
                    output_image_path=os.path.join(dir_path, target_img_name),
                    minimum_percentage_probability=mprob)

                print(
                    "The number of person in frame %d (%s):" %
                    (image_index, target_img_name), len(detections))
                print(
                    "The current time in frame %d (%s):" %
                    (image_index, target_img_name), current_time)

                img = cv2.imread(os.path.join(dir_path, target_img_name))
                # put the number of persons to the image and put timestamp to the image
                self.put_text_to_img(
                    img, "The number of person:%s " % str(len(detections)))
                img_height, img_width = img.shape[0:2]
                self.put_text_to_img(img,
                                     "The current time:%s " % current_time,
                                     pos=(int(img_width * 0.1),
                                          int(img_height * 0.9)))

                cv2.imwrite(os.path.join(dir_path, target_img_name), img)
                video_cap.release()

                return target_img_name, len(detections), current_time

    def capture_frame_by_screenshot_wrapper(self,
                                            image_prefix="screenshot",
                                            mprob=30,
                                            num_im=6,
                                            time_interval=10,
                                            tz=None):
        """
        A wrapper of the function capture_frame_by_screenshot.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            mprob (int): Minimum probability to be a person.
            num_im (int): How many images will be taken.
            time_interval (int): Time interval of taking next image, the unit is second.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

        
        Returns:
            void
        
        """
        print(
            "The current conuting function is based on capture frame by screenshot."
        )

        frames_res = []
        dir_path = os.path.join(self.target_img_path, image_prefix)
        if not os.path.isdir(dir_path):
            os.makedirs(dir_path)
        if num_im <= 0:
            try:
                i = 0
                while True:
                    i = i + 1
                    frame_res = self.capture_frame_by_screenshot(
                        image_prefix, i, mprob, tz)
                    frames_res.append(frame_res)
                    time.sleep(time_interval)
            except KeyboardInterrupt:
                if self.driver is not None:
                    self.driver.quit()
                return frames_res
                print('Abort by key interrupt.')
        else:
            for i in range(num_im):
                frame_res = self.capture_frame_by_screenshot(
                    image_prefix, i, mprob, tz)
                frames_res.append(frame_res)
                time.sleep(time_interval)

            if self.driver is not None:
                self.driver.quit()

            return frames_res

    def capture_frame_by_screenshot(self,
                                    image_prefix="screenshot",
                                    image_index=0,
                                    mprob=30,
                                    num_im=6,
                                    tz=None) -> int:
        """
       capture an image by taking a screenshot on an opened website via browser.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            image_index (int): The postfix of target images. By default, numerated from 0.
            mprob (int): Minimum probability to be a person.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

        
        Returns:
            tuple: The name of target image, the number of persons in an image detected by the model and the current time.
        
        """

        dir_path = os.path.join(self.target_img_path, image_prefix)

        if self.driver is None:
            print("Web driver is none.")
            return None
        else:
            print("-----------------------------------------------------")

            if tz is None:
                current_time = datetime.utcnow().strftime(
                    "%a %Y-%m-%d %H:%M:%S")
                print('### time zone is None, therefore use utc time###')
            else:
                current_time = datetime.now(
                    timezone(tz)).strftime("%a %Y-%m-%d %H:%M:%S")

            target_img_name = "{}{}.png".format(image_prefix, image_index)
            print("Taking screenshot %d..." % image_index)
            self.driver.save_screenshot(os.path.join(dir_path,
                                                     target_img_name))
            detections = self.detector.detectCustomObjectsFromImage(
                custom_objects=self.custom_objects,
                input_image=os.path.join(dir_path, target_img_name),
                output_image_path=os.path.join(dir_path, target_img_name),
                minimum_percentage_probability=mprob)

            print(
                "The number of person in frame %d (%s):" %
                (image_index, target_img_name), len(detections))
            print(
                "The current time in frame %d (%s):" %
                (image_index, target_img_name), current_time)

            img = cv2.imread(os.path.join(dir_path, target_img_name))
            # put the number of persons to the image
            self.put_text_to_img(
                img, "The number of person is:%s" % str(len(detections)))
            img_height, img_width = img.shape[0:2]
            self.put_text_to_img(img,
                                 "The current time:%s " % current_time,
                                 pos=(int(img_width * 0.1),
                                      int(img_height * 0.9)))

            cv2.imwrite(os.path.join(dir_path, target_img_name), img)

            return target_img_name, len(detections), current_time

    def init_webdriver(self):
        """
       Initialize the webdriver of Chrome by using the python lib selenium.
        
        Args:
            Void
        
        Returns:
            Void
        """

        self.driver = webdriver.Chrome(
        )  # Optional argument, if not specified will search path.
        self.driver.get(self.stream_link)
        time.sleep(15)  # Jump over the ads

    def store_info_in_df_csv(
        self,
        infos,
        cvs_filename="counting_person",
    ):
        """
       Collect test dataset by storing the image name and the detected number of persons in a csv file.
        
        Args:
            infos (list): The infos of images contain the image name, the number of detected persons, current time of given time zone
                          and the empty ground-truth.
            cvs_filename (str): The name of csv file.
        
        Returns:
            df (DataFrame): Show the image name, the detected number of persons, current time of given time zone
        """

        df = pd.DataFrame(np.array(infos),
                          columns=['image_name', 'detected_num', 'time'])
        # df["counted_num"] = ""  #only for baseline
        df.to_csv(path_or_buf=os.path.join(self.target_img_path, "%s.csv" %
                                           cvs_filename))
        return df
Ejemplo n.º 30
0
output_topic = 'output3'
brokers = "G01-01:2181,G01-02:2181,G01-03:2181,G01-04:2181,G01-05:2181,G01-06:2181,G01-07:2181,G01-08:2181," \
          "G01-09:2181,G01-10:2181,G01-11:2181,G01-12:2181,G01-13:2181,G01-14:2181,G01-15:2181,G01-16:2181"


def my_decoder(s):
    return s


kafkaStream = KafkaUtils.createStream(ssc, brokers, 'test-consumer-group-3', {input_topic: 15},
                                      valueDecoder=my_decoder)
producer = KafkaProducer(bootstrap_servers='G01-01:9092', compression_type='gzip', batch_size=163840,
                         buffer_memory=33554432, max_request_size=20485760)

detector = ObjectDetection()
detector.setModelTypeAsTinyYOLOv3()  # !!!tiny
detector.setModelPath('/home/hduser/yolo-tiny.h5')
detector.loadModel(detection_speed="flash")
custom = detector.CustomObjects(person=True, bottle=True, knife=True, cell_phone=True, fork=True)
graph = tf.get_default_graph()

broadcast_detector = sc.broadcast(detector)
broadcast_custom = sc.broadcast(custom)
broadcast_graph = sc.broadcast(graph)
broadcast_producer = sc.broadcast(producer)


def obj_detection(ss):
    key = ss[0]
    value = ss[1]