def detect_image(self, image):
        if self.model_image_size != (None, None):
            assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.shape[0], image.shape[1]],  # [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        thickness = (image.shape[0] + image.shape[1]) // 600
        fontScale = 1
        ObjectsList = []

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            # label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            mid_h = (bottom - top) / 2 + top
            mid_v = (right - left) / 2 + left

            # put object rectangle
            cv2.rectangle(image, (left, top), (right, bottom), self.colors[c], thickness)

            # get text size
            (test_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX,
                                                                  thickness / self.text_size, 1)

            # put text rectangle
            cv2.rectangle(image, (left, top), (left + test_width, top - text_height - baseline), self.colors[c],
                          thickness=cv2.FILLED)

            # put text above rectangle
            cv2.putText(image, label, (left, top - 2), cv2.FONT_HERSHEY_SIMPLEX, thickness / self.text_size, (0, 0, 0),
                        1)

            # add everything to list
            ObjectsList.append([top, left, bottom, right, mid_v, mid_h, label, scores])

        return image, ObjectsList
Esempio n. 2
0
    def detect_image_without_drawing(self, image):
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(image), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [image.shape[0],
                 image.shape[1]],  # [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        thickness = (image.shape[0] + image.shape[1]) // 600
        fontScale = 1
        ObjectsList = []

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            # label = '{} {:.2f}'.format(predicted_class, score)
            # label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            mid_h = (bottom - top) / 2 + top
            mid_v = (right - left) / 2 + left

            # add everything to list
            ObjectsList.append(
                [left, top, right, bottom, mid_v, mid_h, c, scores])

        return ObjectsList
    def detect_image(self, image):
        # if self.model_image_size != (None, None):
        #     assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
        #     assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
        #     boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
        #     image_data = boxed_image

        # image_data = image/255
        # image_data = np.expand_dims(image_data,0)
        frame = cv2.resize(image, (416, 416))
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(frame), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [frame.shape[0],
                 frame.shape[1]],  # [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
        Rx = image.shape[1] / 416
        Ry = image.shape[0] / 416

        thickness = (image.shape[0] + image.shape[1]) // 600
        fontScale = 1
        ObjectsList = []
        # print(out_classes)

        for i, c in reversed(list(enumerate(out_classes))):
            if c == 15:
                continue
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            # label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            top = int(top * Ry)
            left = int(left * Rx)
            bottom = int(bottom * Ry)
            right = int(right * Rx)

            if c == 4:
                try:
                    img = image[top:bottom + 15, left - 20:right + 20]
                    text = read_plate.detect(img)
                    # text = Image_to_string(img)
                    text = text[:2] + '000'
                    cv2.putText(image, text[:5], (left, top),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255),
                                3)
                    print(text)
                except:
                    print('sth wrong')
            # x, y, w, h = box
            # print(box)
            # top = max(0, np.floor(x + 0.5).astype(int))
            # left = max(0, np.floor(y + 0.5).astype(int))
            # right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
            # bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))

            mid_h = (bottom - top) / 2 + top
            mid_v = (right - left) / 2 + left

            # put object rectangle
            if c != 4:
                cv2.rectangle(image, (left, top), (right, bottom),
                              self.colors[c], thickness)

                # get text size
                (test_width, text_height), baseline = cv2.getTextSize(
                    label, cv2.FONT_HERSHEY_SIMPLEX,
                    thickness / self.text_size, 1)

                # put text rectangle
                cv2.rectangle(
                    image, (left, top),
                    (left + test_width, top - text_height - baseline),
                    self.colors[c],
                    thickness=cv2.FILLED)

                # put text above rectangle
                cv2.putText(image, label, (left, top - 2),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            thickness / self.text_size, (0, 0, 0), 1)

            # add everything to list
            ObjectsList.append(
                [top, left, bottom, right, mid_v, mid_h, label, scores])

        return image, ObjectsList
Esempio n. 4
0
        def detect_image(self, image):

            # if self.model_image_size != (None, None):
            #     assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
            #     assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
            #     boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
            #     image_data = boxed_image

            # image_data = image/255
            # image_data = np.expand_dims(image_data,0)
            if self.model_image_size != (None, None):
                assert self.model_image_size[
                    0] % 32 == 0, 'Multiples of 32 required'
                assert self.model_image_size[
                    1] % 32 == 0, 'Multiples of 32 required'
                boxed_image = image_preporcess(
                    np.copy(image), tuple(reversed(self.model_image_size)))
                image_data = boxed_image

            out_boxes, out_scores, out_classes = self.sess.run(
                [self.boxes, self.scores, self.classes],
                feed_dict={
                    self.yolo_model.input: image_data,
                    self.input_image_shape:
                    [image.shape[0],
                     image.shape[1]],  # [image.size[1], image.size[0]],
                    K.learning_phase(): 0
                })

            # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

            thickness = (image.shape[0] + image.shape[1]) // 600
            fontScale = 1
            ObjectsList = []
            # print(out_classes)

            left_axes = np.array([
                max(0,
                    np.floor(out_boxes[i][1] + 0.5).astype('int32'))
                for i, c in enumerate(out_classes) if c == 4
            ])
            num_obj_true = 3 * len(left_axes)
            if len(out_classes) < num_obj_true:
                detect = 0
            else:
                detect = 1

            no_obj = [str(i + self.num_obj) for i in range(len(left_axes))]

            list_text = []
            list_obj = []
            pair = []
            barcode_list = []

            for i, c in enumerate(out_classes):

                predicted_class = self.class_names[c]

                box = out_boxes[i]
                score = out_scores[i]

                # label = '{} {:.2f}'.format(predicted_class, score)
                label = '{}'.format(predicted_class)
                scores = '{:.2f}'.format(score)

                top, left, bottom, right = box
                top = max(0, np.floor(top + 0.5).astype('int32'))
                left = max(0, np.floor(left + 0.5).astype('int32'))
                bottom = min(image.shape[0],
                             np.floor(bottom + 0.5).astype('int32'))
                right = min(image.shape[1],
                            np.floor(right + 0.5).astype('int32'))

                mid_h = (bottom - top) / 2 + top
                mid_v = (right - left) / 2 + left

                if left < 10 and (time.time() -
                                  self.start_tracking) > 2 and i == 0:
                    self.num_obj += 1
                    self.start_tracking = time.time()
                # print(self.num_obj)

                if (c != 15) and (c != 4):
                    cv2.rectangle(image, (left, top), (right, bottom),
                                  self.colors[c], thickness)
                    # (test_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX,
                    #                                                       thickness / self.text_size, 1)
                    #
                    # # put text rectangle
                    # cv2.rectangle(image, (left, top), (left + test_width, top - text_height - baseline), self.colors[c],
                    #               thickness=cv2.FILLED)
                    #
                    # # put text above rectangle
                    # cv2.putText(image, label, (left, top - 2), cv2.FONT_HERSHEY_SIMPLEX, thickness / self.text_size,
                    #             (0, 0, 0),1)
                    # distance = np.absolute(left - left_axes)
                    # print(distance)
                    distance = np.absolute(left - left_axes)
                    distance_index = np.where(distance < 200)
                    # print(distance_index)
                    distance_index = list(distance_index[0])
                    try:
                        del no_obj[distance_index[0]]
                        # print(left)
                        # print(list_text)
                        # print(list_text[distance_index[0]])
                        pair.append([
                            distance_index[0], label,
                            list_text[distance_index[0]]
                        ])

                    except:
                        print('Something Wrong')
                elif c == 4:
                    # having_text = 1
                    # image = scipy.ndimage.zoom(image,(2,2,1),order = 1)## upsampling image

                    try:
                        img = image[top:bottom + 15, left - 20:right + 20]
                        text = read_plate.detect(img)

                        n = len(text) - 3
                        text = text[:n] + '000'
                        list_text.append(text[:6])
                        cv2.putText(image, text[:6], (left, top),
                                    cv2.FONT_HERSHEY_SIMPLEX, 2,
                                    (255, 255, 255), 3)

                    except:
                        print('sth wrong in text')
                elif c == 15:
                    try:
                        img = image[top:bottom + 15, left - 20:right + 20]
                        text = barcode_detect(img).decode("utf-8")
                        # cv2.putText(image, text[:6], (left, top), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
                        # print(text)
                        barcode_list.append(text)
                    except:
                        print('sth wrong')
                        barcode_list.append('error')

                    #### hien thi text gia tren object

                    # print(text[:6])
            not_match_list = []
            # print(pair)
            obj_text = make_no_obj_text(no_obj)
            barcode_string = make_no_obj_text(barcode_list)

            if len(no_obj) == 0:
                obj_text = '...'

            for idx, object, text in pair:
                if check([object, text]):
                    continue
                else:
                    not_match_list.append(str(idx + self.num_obj))
            if len(not_match_list) == 0:
                match_text = '...'
            else:
                match_text = make_no_obj_text(not_match_list)

                # add everything to list
                # ObjectsList.append([top, left, bottom, right, mid_v, mid_h, label, scores])

            return image, detect, obj_text, match_text, no_obj, not_match_list, barcode_string
Esempio n. 5
0
    def detect_image(self, image):
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(image), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [image.shape[0],
                 image.shape[1]],  #[image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        global second_pass
        if second_pass == True:
            yolo = YOLO()

        thickness = (image.shape[0] + image.shape[1]) // 600
        fontScale = 1
        ObjectsList = []
        global Motor
        global Noframes
        global frame

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]
            #print((score))
            print(predicted_class)
            if (predicted_class == 'Hammer') or (predicted_class == 'Axe') or (
                    predicted_class
                    == 'Helmet') or (predicted_class == 'Kitchen_Knife') or (
                        predicted_class == 'Knife') or (predicted_class
                                                        == 'Handgun'):
                print('&&&&&&&&&&&')

            label = '{} {:.2f}'.format(predicted_class, score)
            #label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            mid_h = (bottom - top) / 2 + top
            mid_v = (right - left) / 2 + left
            global Helmet_Detected
            if predicted_class == 'Axe':
                print('abnormal')
            elif predicted_class == 'Hammer':
                print('abnormal')
            elif predicted_class == 'Helmet':
                print('abnormal')

            elif predicted_class == 'Kitchen_Knife':
                print('abnormal')

            elif predicted_class == 'Knife':
                print('abnormal')

        return image, ObjectsList
    def detect_image(self, image):
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(image), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [image.shape[0],
                 image.shape[1]],  #[image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        #print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        #thickness = (image.shape[0] + image.shape[1]) // 600
        #fontScale=1
        cropImgs = []
        areas = []

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            #label = '{}'.format(predicted_class)
            #scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            #mid_h = (bottom-top)/2+top
            #mid_v = (right-left)/2+left

            if str(label[0]) == "B":
                # put object rectangle
                if int(left - 20) < 0:
                    boxLeft = left
                else:
                    boxLeft = int(left - 20)
                if int(top - 20) < 0:
                    boxTop = top
                else:
                    boxTop = int(top - 20)
                if int(right + 20) > image.shape[1]:
                    boxRight = right
                else:
                    boxRight = int(right + 20)
                if int(bottom + 20) > image.shape[0]:
                    boxBottom = bottom
                else:
                    boxBottom = int(bottom + 20)
                #cv2.rectangle(image, (boxLeft, boxTop), (boxRight, boxBottom), self.colors[c], thickness)
                #img = cv2.imread(image)
                crop_img = image[boxTop:boxBottom, boxLeft:boxRight]
                #area = (bottom - top)*(right - left)
                area = (boxBottom - boxTop) * (boxRight - boxLeft)

                areas.append(area)
                cropImgs.append(crop_img)
            else:
                pass
            # get text size
            #(test_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, 1)

            # put text rectangle
            #cv2.rectangle(image, (left, top), (left + test_width, top - text_height - baseline), self.colors[c], thickness=cv2.FILLED)

            # put text above rectangle
            #cv2.putText(image, label, (left, top-2), cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, (0, 0, 0), 1)

            # add everything to list
        try:
            crop = cropImgs[areas.index(max(areas))]
        except:
            crop = -1

        return image, crop
Esempio n. 7
0
    def detect_image(self, image):
        # if self.model_image_size != (None, None):
        #     assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
        #     assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
        #     boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
        #     image_data = boxed_image

        # image_data = image/255
        # image_data = np.expand_dims(image_data,0)
        frame = cv2.resize(image, (416, 416))
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(frame), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [frame.shape[0],
                 frame.shape[1]],  # [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
        Rx = image.shape[1] / 416
        Ry = image.shape[0] / 416

        left_text = top_text = width_text = height_text = _conf_text = 0
        _class_text = None
        for i, c in reversed(list(enumerate(out_classes))):

            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            # label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            top = int(top * Ry)
            left = int(left * Rx)
            bottom = int(bottom * Ry)
            right = int(right * Rx)

            if c == 14:
                # put object rectangle
                left_text = left
                top_text = top
                right_text = right
                bottom_text = bottom
                _class_text = c
                _conf_text = score
            # image = frame[top:top + height, left:left + width]  # crop image to recognize

            # ve bounding boxes

            # drawPred(classIDs[i], confidences[i], left, top, left + width, top + height)

            # if len(indices) and having_text != 1:
            #     detect = 1
            #     having_text = 0
            # else:
            #     detect = 0
        print(_class_text)
        # print(len(indices))
        if _class_text == None:
            return [_class_text]
        else:
            return [
                _class_text, left_text, top_text, right_text, bottom_text,
                _conf_text
            ]
    def detect_image(self, image):
        # if self.model_image_size != (None, None):
        #     assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
        #     assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
        #     boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
        #     image_data = boxed_image

        # image_data = image/255
        # image_data = np.expand_dims(image_data,0)
        frame = cv2.resize(image, (416, 416))
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = image_preporcess(
                np.copy(frame), tuple(reversed(self.model_image_size)))
            image_data = boxed_image

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape:
                [frame.shape[0],
                 frame.shape[1]],  # [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
        Rx = image.shape[1] / 416
        Ry = image.shape[0] / 416

        thickness = (image.shape[0] + image.shape[1]) // 600
        fontScale = 1
        ObjectsList = []
        _class_text = None
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            # label = '{}'.format(predicted_class)
            scores = '{:.2f}'.format(score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))

            top = int(top * Ry)
            left = int(left * Rx)
            bottom = int(bottom * Ry)
            right = int(right * Rx)

            mid_h = (bottom - top) / 2 + top
            mid_v = (right - left) / 2 + left

            # put object rectangle
            cv2.rectangle(image, (left, top), (right, bottom), self.colors[c],
                          thickness)

            # get text size
            (test_width, text_height), baseline = cv2.getTextSize(
                label, cv2.FONT_HERSHEY_SIMPLEX, thickness / self.text_size, 1)

            # put text rectangle
            cv2.rectangle(image, (left, top),
                          (left + test_width, top - text_height - baseline),
                          self.colors[c],
                          thickness=cv2.FILLED)

            # put text above rectangle
            cv2.putText(image, label, (left, top - 2),
                        cv2.FONT_HERSHEY_SIMPLEX, thickness / self.text_size,
                        (0, 0, 0), 1)

            # add everything to list
            if c == 11:
                left_text = left
                top_text = top
                bottom_text = bottom
                right_text = right
                _class_text = c

        if _class_text == None:
            return [_class_text]
        else:
            return [_class_text, left_text, top_text, right_text, bottom_text]