def detect_common_objects(image):
    """Detects common objects"""

    bbox, label, conf = cv.detect_common_objects(image, confidence=0.25, model="yolov4-tiny")
    output_image = draw_bbox(image, bbox, label, conf)

    return output_image
def process(time):
    counts = {1: 29, 2: 26, 3: 75, 4: 51}
    for i in range(4):
        if int(time) <= lot_limit[i]:
            img_name = '../img/' + lot[i] + '_' + time + '.jpg'
            im = cv2.imread(img_name)
            bbox, label, conf = cv.detect_common_objects(im)
            output_image = draw_bbox(im,
                                     bbox,
                                     label,
                                     conf,
                                     colors=np.random.uniform(80,
                                                              81,
                                                              size=(80, 3)))
            # plt.savefig("../output_img/"+lot[i] + '_' + time + '.jpg')

            cv2.imwrite("../output_img/" + lot[i] + '_' + time + '.jpg',
                        output_image)
            # plt.subplots()

            # plt.imshow(output_image)
            # plt.show()
            count = label.count('car')
            counts[i + 1] -= count
        else:
            counts[i + 1] = -1
    return counts
Exemple #3
0
def live():
    loadvideo = cv2.VideoCapture('data/demo.mp4')

    if not loadvideo.isOpened():
        print("Could not open video")
        exit()

    while loadvideo.isOpened():
        status, frame = loadvideo.read()

        if not status:
            break

        bbox, label, conf = cv.detect_common_objects(frame,
                                                     confidence=0.25,
                                                     model='yolov3-tiny')

        print(bbox, label, conf)

        out = draw_bbox(frame, bbox, label, conf, write_conf=True)

        cv2.imshow("Real-time object detection", out)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    loadvideo.release()
    cv2.destroyAllWindows()
def check_for_objects(image_path):
    # load image with OpenCV
    image = cv2.imread(image_path)
    # get dimensions of original image
    og_height, og_width, rgb = image.shape
    total_widths = 0
    total_heights = 0
    # with computer vision, identify objects and return the bounding box co-ordinates, corrensponding labels and confidence scores
    coordinates, item, confidence = cvlib.detect_common_objects(image)
    if coordinates and coordinates[0]:
        # save coordinates of object
        for coordinate in coordinates:
            # unpacking coordinates of each object found
            x1, y1, x2, y2 = coordinate
            total_widths += (x2 - x1)
            total_heights += (y2 - y1)
        # calculate area to see if the objects occupy at least 50% of the image
        if (total_widths * total_heights) / (og_width * og_height) > 0.5:
            # add rectangles around the objects to the image with the axes, readjust rgb color back to original since OpenCV reads image colors differently
            output_image = cv2.cvtColor(
                draw_bbox(image, coordinates, item, confidence),
                cv2.COLOR_BGR2RGB)
            pyplot.imshow(output_image)
            return True
    return False
Exemple #5
0
def video_stream():
    # grab the frame from the threaded video stream
    frame = cap.read()
    frame = frame[1] if args.get("input", False) else frame
    #print(_)

    # convert the input frame from BGR to RGB then resize it to have
    # a width of 750px (to speedup processing)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    rgb = imutils.resize(frame, width=750)
    #plt.imshow(frame)
    #plt.show()

    #r = frame.shape[1] / float(rgb.shape[1])

    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input frame, then compute
    # the facial embeddings for each face
    bbox, label, conf = cv.detect_common_objects(rgb, enable_gpu=True)
    frame = draw_bbox(rgb, bbox, label, conf)
    #cv2.line(frame, (0, 250),(750,250),(0, 0, 0))

    #are supposed to display the output frame to
    # the screen
    if args["display"] > 0:
        fram = frame
        cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
        img = Image.fromarray(cv2image)
        imgtk = ImageTk.PhotoImage(image=img)
        lmain.imgtk = imgtk
        lmain.configure(image=imgtk)
        lmain.after(1, video_stream)
    def detect(self,
               frame: np.array,
               det_objs: set = None) -> (int, np.array, list):
        bbox, labels, conf = cv.detect_common_objects(
            frame,
            confidence=.5,
            model=self.model,
        )

        self.logger.info(labels)

        # only keep in the det_objs set
        if det_objs is not None:
            idxs = [i for i, l in enumerate(labels) if l in det_objs]
            labels = list(np.array(labels)[idxs])
            bbox = list(np.array(bbox)[idxs])
            conf = list(np.array(conf)[idxs])

        frame = draw_bbox(
            img=frame,
            bbox=bbox,
            labels=labels,
            confidence=conf,
            write_conf=False,
        )

        return 0, frame, labels
Exemple #7
0
def detect_objects(image):
    im = cv2.imread(image)
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)

    plt.imsave('{}/response.jpg'.format(image.split('/')[0]), output_image)
def countVeh(imagePath):
    """
  TO COUNT NO OF VEHICHLES

  Using Tiny Yolo v3  Pre-Trained on Ms-Coco dataset
  
  Arguments:
      imagePath {[string]} -- [path of image]
  """

    image = cv2.imread(imagePath)
    #boundingbox, label, conf = cv.detect_common_objects(image)
    boundingbox, label, conf = cv.detect_common_objects(image,
                                                        confidence=0.25,
                                                        model='yolov3-tiny')
    output_image = draw_bbox(image, boundingbox, label, conf)

    plt.imshow(output_image)
    plt.show()
    print('Number of Objects in the image is ', "Cars :",
          str(label.count('car')), " Bikes :", str(label.count('motorcycle')),
          " Bicycles :", str(label.count('bicycle')), " Trucks :",
          str(label.count('truck')), "Bus :", str(label.count('bus')))

    countJson = {
        "Labels": {
            "Cars": label.count('car'),
            "Bikes": label.count('motorcycle'),
            "Bicycles": label.count('bicycle'),
            "Trucks": label.count('truck'),
            "Bus": label.count('bus')
        }
    }

    allImageCount[imagePath] = countJson
def main():

    # instantiate video capture object
    cap = cv2.VideoCapture(0)
    # creating a queue to share data to speech process
    speakQ = multiprocessing.Queue()
    # creating speech process to not hang processor
    p1 = multiprocessing.Process(target=speak, args=(speakQ, ))
    # starting process 1 - speech
    p1.start()

    # keeps program running forever until ctrl+c or window is closed
    while True:
        # capture image
        _, img = cap.read()
        # use detect common objects model to label objects
        bbox, labels, conf = cv.detect_common_objects(img)
        # draw labels
        img = draw_bbox(img, bbox, labels, conf)
        # send unique string denoting new labels being sent to speech
        speakQ.put('#new#')
        # send each label to text to speech process
        for label in labels:
            speakQ.put(label.lower())
        # display and wait 10ms
        cv2.imshow('my webcam', img)
        cv2.waitKey(10)

    # clean up if you want to remove while loop
    cap.release()
    p1.terminate()
    cv2.destroyAllWindows()
Exemple #10
0
def uploadPhoto(photo):
    im = cv2.imread(photo)
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    print(label)
    addQuery(label)
    return output_image
Exemple #11
0
    def detect_objects_basic(self, frame, mask):

        detected_objects = []

        print(frame.shape)

        # bbox, label, conf = detect_common_objects(frame, confidence=0.25)
        # bbox, label, conf = detect_common_objects(frame, confidence=0.75, enable_gpu=True)
        bbox, label, conf = detect_common_objects(frame,
                                                  confidence=0.3,
                                                  model='yolov3-tiny')

        if DEBUG_MODE:
            output_image = draw_bbox(frame, bbox, label, conf)
            cv2.imshow('all objects', output_image)

            # for i in range(len(bbox)):
            #     detected_objects.append({
            #         'label': label[i],
            #         'bbox': bbox[i],
            #         'conf': conf[i],
            #         'x': extracted_object['x'],
            #         'y': extracted_object['y'],
            #         'width': extracted_object['width'],
            #         'height': extracted_object['height'],
            #         'center_x': int(extracted_object['x'] + extracted_object['width'] / 2),
            #         'center_y': int(extracted_object['y'] + extracted_object['height'] / 2)
            #     })

            print(detected_objects)

        return detected_objects
Exemple #12
0
def obj_detect(img):
    """Function to proccess an image through cvlib API.

    Args:
        img (numpy.ndarray) = Array of image to proccess.

    Return:
        Dictionary of results.
    """
    name = str(uuid4())
    result = {}
    # Apply object detection
    bbox, labels, conf = cv.detect_common_objects(img, confidence=0.40)
    # Create results dictionary.
    for element in labels:
        result[element] = labels.count(element)
    # Draw bounding box over detected objects
    out = draw_bbox(img, bbox, labels, conf)
    img_data = base64.b64encode(out)
    #_, img_encoded = cv2.imencode('.jpg', out)
    #result['img'] = img_data
    try:
        os.mkdir('image/')
    except FileExistsError as error:
        pass
    # Save output
    archive_name = "image/" + name + ".jpg"
    cv2.imwrite(archive_name, out)
    # Function to save img in blob storage
    send_to_blob()
    return(result)
Exemple #13
0
def detect4():

    im = cv2.imread('image/cam4.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s4 = int(label.count('car'))
    s4d = s4
    print("no of vehicles", s4)
    calculation4(s4d)
def countVehicleVideo(videoPath):
    """[summary]
      
  
  Arguments:
      videoPath {[string]} -- [VideoPath]
  """

    vidObj = cv2.VideoCapture(videoPath)

    count = 0

    success = 1

    while success:

        success, image = vidObj.read()

        boundingbox, label, conf = cv.detect_common_objects(
            image, confidence=0.20, model='yolov3-tiny')
        output_image = draw_bbox(image, boundingbox, label, conf)

        car = label.count('car')
        bike = label.count('motorcycle')
        bicycle = label.count('bicycle')
        truck = label.count('truck')
        bus = label.count('bus')

        if not ((bike == 0) and (car == 0) and (truck == 0) and (bus == 0) and
                (bicycle == 0)):

            print(len(label))
            plt.imshow(output_image)

            plt.show()
            print('Number of Objects in the image is ',
                  "Cars :", str(car), " Bikes :", str(bike), " Bicycles :",
                  str(bicycle), " Trucks :", str(truck), "Bus :", str(bus))

        countJson = {
            "Labels": {
                "Cars": car,
                "Bikes": bike,
                "Bicycles": bicycle,
                "Trucks": truck,
                "Bus": bus
            }
        }

        allImageCount["img{:d}".format(count)] = countJson

        count += 1

        #boundingbox, label, conf = cv.detect_common_objects(image)

    vidObj.release()
    cv2.destroyAllWindows()
Exemple #15
0
def detect():

    im = cv2.imread('image/cam1.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s=int(label.count('car'))
    s1d=s
    print("no of vehicles1 ",s)
    calculation1(s)
Exemple #16
0
def detect3():

    im = cv2.imread('image/cam3.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s3 = int(label.count('car'))
    s3d = s3
    print("number of vehicles", s3)
    calculation3(s3)
Exemple #17
0
 def clicked(self):
     filename = askopenfilename()
     img = mpimg.imread(filename)
     imgplot = plt.imshow(img)
     im = cv2.imread(filename)
     bbox, label, conf = cv.detect_common_objects(im)
     output_image = draw_bbox(im, bbox, label, conf)
     plt.imshow(output_image)
     plt.show()
Exemple #18
0
def detect2():

    im = cv2.imread('image/cam2.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s2 = int(label.count('car'))
    s2d = s2
    print("no of vehicles2", s2)
    calculation2(s2)
Exemple #19
0
def get_image_data(df):
    #df = pd.read_csv("data/data.csv")
    df = df.values
    freq_list = []
    sessions = {}
    for i in range(len(df)):
        X = df[i, 1]
        X = json.loads(X)
        Y = df[i, 2]
        Y = json.loads(Y)
        im = cv2.imread('data/images/image' + str(i) + '.png')

        dim = (1920, 1080)
        resized = cv2.resize(im, dim, interpolation=cv2.INTER_AREA)
        bbox, label, conf = cv.detect_common_objects(resized)
        freq = 0
        for j in range(len(label)):
            if label[j] != 'laptop' and label[j] != 'tv':
                x1, y1, x2, y2 = bbox[j]
                for k in range(len(X)):
                    if FindPoint(x1, y1, x2, y2, X[k], Y[k]):
                        freq += 1
        if len(X) != 0:
            freq_list.append(freq / len(X))
        else:
            freq_list.append(0)
        output_image = draw_bbox(resized, bbox, label, conf)
        if len(X) > 2:
            try:
                hmax = sns.kdeplot(X,
                                   Y,
                                   color='r',
                                   shade=True,
                                   cmap=alpha_cmap(plt.cm.viridis),
                                   shade_lowest=False)
                #hmax.collections[0].set_alpha(0)
                xmin = min([x for x in X if x > 0])
                xmax = max([x for x in X if x < 1980])
                ymin = min([x for x in Y if x > 0])
                ymax = max([x for x in Y if x < 1080])
                print(xmin, xmax, ymin, ymax)
                if df[i, 4] not in sessions:
                    sessions[df[i, 4]] = [(abs((ymax - ymin) * (xmax - xmin)),
                                           (im.shape[0] * im.shape[1]))]
                else:
                    sessions[df[i, 4]].append((abs(
                        (ymax - ymin) * (xmax - xmin)),
                                               (im.shape[0] * im.shape[1])))
            except:
                print("exception occured")

        plt.imshow(output_image, zorder=0)
        plt.savefig("heatmaps/map" + str(i) + ".png")
        plt.clf()

    return freq_list, sessions
Exemple #20
0
def detect():

    im = cv2.imread('image/cam2.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    plt.imshow(output_image)
    plt.show()
    s = int(label.count('car'))
    s1d = s
    print("no of vehicles1 ", s)
Exemple #21
0
    def segment_and_recognize(input):
        trained_image_width = 512
        mean_subtraction_value = 127.5
        image = np.array(Image.open(input))

        # get the YOLOv3 bounding boxes, labels and confidences
        bbox, YOLO_label, conf = cv.detect_common_objects(image)

        # resize to max dimension of images from training dataset
        w, h, _ = image.shape
        ratio = float(trained_image_width) / np.max([w, h])
        resized_image = np.array(
            Image.fromarray(image.astype('uint8')).resize(
                (int(ratio * h), int(ratio * w))))

        # apply normalization for trained dataset images
        resized_image = (resized_image / mean_subtraction_value) - 1.

        # pad array to square image to match training images
        pad_x = int(trained_image_width - resized_image.shape[0])
        pad_y = int(trained_image_width - resized_image.shape[1])
        resized_image = np.pad(resized_image, ((0, pad_x), (0, pad_y), (0, 0)),
                               mode='constant')

        # make prediction for Deeplab
        deeplab_model = Deeplabv3()
        res = deeplab_model.predict(np.expand_dims(resized_image, 0))
        labels = np.argmax(res.squeeze(), -1)

        # remove padding and resize back to original image
        if pad_x > 0:
            labels = labels[:-pad_x]
        if pad_y > 0:
            labels = labels[:, :-pad_y]
        labels = Image.fromarray(labels.astype('uint8')).resize(
            (h, w)).convert()

        # converts PIL greyscale image to numpy array compatible with cvlib
        # and overlays the segment over original input image
        temp = 'temp.png'
        plt.imsave(temp, labels)
        background = Image.open(input).convert('RGBA')
        segments = Image.open(temp)
        overlay = Image.blend(background, segments, 0.5)
        img = np.array(overlay)

        # display the YOLOv3 bounding boxes and labels on the overlay
        img = draw_bbox(img=img, bbox=bbox, labels=YOLO_label, confidence=conf)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        os.remove(temp)

        # show final result
        plt.imshow(img)
        plt.waitforbuttonpress()
        plt.show()
def detectAndCount(showOutputImage=False):
    print("Detecting cars now")
    im = cv2.imread('./../RoadImages/cars_2.jpg')
    bbox, label, conf = cv.detect_common_objects(im)
    print("Done detecting. Drawing contours now.")
    if showOutputImage == True:
        output_image = draw_bbox(im, bbox, label, conf)
        plt.imshow(output_image)
        plt.show()
    numberOfCars = label.count('car')
    print('Number of cars in the image is ', numberOfCars)
Exemple #23
0
 def callback(self, msg):
     time_now = time.time()
     img_opencv = self.cv_bridge.imgmsg_to_cv2(msg)
     bbox, label, conf = cv.detect_common_objects(img_opencv,
                                                  enable_gpu=False)
     output_image = draw_bbox(img_opencv, bbox, label, conf)
     img_msg = self.cv_bridge.cv2_to_imgmsg(output_image)
     img_msg.header = msg.header
     self.pub.publish(img_msg)
     self.get_logger().info("Detection took {}s ".format(time.time() -
                                                         time_now))
def opencv_save_movie_with_frames(file_name, start_frame, end_frame):
    new_filename = "{start_frame}_{end_frame}_{file_name}".format(
        start_frame=start_frame, end_frame=end_frame, file_name=file_name)
    input_video = cv2.VideoCapture(file_name)
    for _ in range(start_frame):
        input_video.read()
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Be sure to use lower case
    width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = input_video.get(cv2.CAP_PROP_FPS)
    output_video = cv2.VideoWriter(new_filename, fourcc, fps, (width, height),
                                   True)
    for _ in range(end_frame - start_frame):
        success, data = input_video.read()
        bbox, label, conf = cv.detect_common_objects(data)
        draw_bbox(data, bbox, label, conf)
        output_video.write(data)
    output_video.release()
    input_video.release()
    return new_filename
Exemple #25
0
def video():
    # drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(1000.0)

        container = av.open(drone.get_video_stream())
        print("type(container)", type(container))

        # skip first 300 frames
        frame_skip = 300
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                # height, width, channels = image.shape
                # print("height, width, channels of the picture are:", height, width, channels)

                # cv2.imshow('Original', image)
                # cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                # apply object detection
                # status, frame = webcam.read()
                # bbox, label, conf = cv.detect_common_objects(image)
                # faces, confidences = cv.detect_face(image)
                # print("faces, confidences", faces, confidences)
                # label = ["person"]
                # labelos = [[faces]] ['faces'] [confidences]
                # out = draw_bbox(image,[faces],['person'],[confidences])
                bbox, label, conf = cv.detect_common_objects(image)
                print(bbox, label, conf)
                out = draw_bbox(image, bbox, label, conf)
                # display output
                cv2.imshow("Real-time object detection", out)
                getnavcoordinates(bbox, label)
                navigatemiddle(bbox, label, conf, frame)
                # press "Q" to stop
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                # cv2.imshow("Real-time object detection")
                # cv2.waitKey(1)
                frame_skip = int((time.time() - start_time) / frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Exemple #26
0
    def recog_face(self, frame):
        del self.nameInFrame[:]
        # using cvlib to detect common objects
        bbox, label, conf = cvl.detect_common_objects(frame,
                                                      confidence=0.4,
                                                      model='yolov3-tiny')
        if 'person' not in label:
            frame = draw_bbox(frame, bbox, label, conf, write_conf=True)

        frame = imutils.resize(frame, width=600)
        (h, w) = frame.shape[:2]
        imageBlob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
                                          1.0, (300, 300),
                                          (104.0, 177.0, 123.0),
                                          swapRB=False,
                                          crop=False)
        self.faceDetector.setInput(imageBlob)
        detections = self.faceDetector.forward()
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            if confidence > self.baseConfidence:
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                face = frame[startY:endY, startX:endX]
                (fH, fW) = face.shape[:2]

                if fW < 20 or fH < 20:
                    continue

                faceBlob = cv2.dnn.blobFromImage(face,
                                                 1.0 / 255, (96, 96),
                                                 (0, 0, 0),
                                                 swapRB=True,
                                                 crop=False)
                self.embedder.setInput(faceBlob)
                vec = self.embedder.forward()

                predictions = self.recogniser.predict_proba(vec)[0]
                j = np.argmax(predictions)
                probability = predictions[j]
                name = self.labelEncoder.classes_[j]
                if name not in self.nameInFrame:
                    self.nameInFrame.append(name)

                text = "{}: {:.2f}%".format(name, probability * 100)
                labelPos = startY - 10 if startY - 10 > 10 else startY + 10
                frame = cv2.rectangle(frame, (startX, startY), (endX, endY),
                                      (0, 0, 255), 2)
                frame = cv2.putText(frame, text, (startX, labelPos),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.45,
                                    (0, 0, 255), 2)

        return frame, self.nameInFrame
Exemple #27
0
def detect2():

    im = cv2.imread('image/cam22.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s = s + int(label.count('car'))
    im = cv2.imread('image/cam23.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s = s + int(label.count('car'))
    im = cv2.imread('image/cam23.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s = s + int(label.count('car'))
    im = cv2.imread('image/cam24.jpeg')
    bbox, label, conf = cv.detect_common_objects(im)
    output_image = draw_bbox(im, bbox, label, conf)
    s = s + int(label.count('car'))
    m = (s / 4)
    print("no of vehicles1 ", m)
    calculation2(m)
Exemple #28
0
    def process(self):
        logging.info("PROCESSING")
        boxes, labels, conf = cv.detect_common_objects(
            self.image, model="yolov3")

        self.data["labels"] = labels
        self.data["boxes"] = boxes

        for i in range(len(labels)):
            label = labels[i]
            if label == "person":
                confidence = conf[i]
                self.data["person_found"] = True
                logging.info("--- FOUND PERSON with %s", confidence)
                cv2.imwrite(self.person_file_name, self.image)
                box = boxes[i]
                rect = Rectangle((box[0], box[1]), (box[2], box[3]))

                left = rect.overlaps(leftBox)
                right = rect.overlaps(rightBox)
                back = rect.overlaps(backBox)

                self.data["person_found_in_left_box"] = left
                self.data["person_found_in_right_box"] = right
                self.data["person_found_in_back_box"] = back

                logging.info("---- Checking leftBox %s", left)
                logging.info("---- Checking rightBox %s", right)
                logging.info("---- Checking backBox %s", back)

                if ((left and back) or (right and back)):
                    logging.info("----- PERSON IN THE RECTANGLE %s", conf[i])
                    self.data["person_found_in_rectangle"] = True
                    out = draw_bbox(self.image, [box], [label], [conf[i]])
                    cv2.imwrite(self.detected_file_name, out)
                    Slacker.postToSlack(self.detected_file_name)
                    Slacker.postToTwitter(self.detected_file_name)

                    with open(self.json_file_name, "w") as outfile:
                        json.dump(self.data, outfile, sort_keys=True,
                                  indent=4, separators=(',', ': '))

                # print("-- RENAMING " + self.filename +
                #       " TO " + self.processed_file_name)

                # os.rename(self.filename, self.processed_file_name)
                # print("-- WRITING " + self.json_file_name)

        logging.info("-- REMOVING  %s", self.filename)
        try:
            os.remove(self.filename)
        except:
            print("Error while deleting file ", self.filename)
Exemple #29
0
def detect_cv_labels(photo, result_path):
    im = cv2.imread(photo)
    bbox, label, conf = cv.detect_common_objects(im)
    prediction = get_cv_labels(photo, bbox, label, conf)

    # Verify that result directory exists
    verify_directory(result_path + RESULT_SUBDIR)

    # Create image with bounding boxes
    boxed_image = draw_bbox(im, bbox, label, conf)
    cv2.imwrite(result_path + RESULT_SUBDIR + os.path.sep + BOXED_PREFIX + photo, boxed_image)
    return prediction
Exemple #30
0
 def object_detect(image):
     #read in image
     img = cv2.imread(image)
     #convert BGR to RGB
     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
     #detect objects
     bbox, label, conf = cv.detect_common_objects(img)
     #display labels
     output_image = draw_bbox(img, bbox, label, conf)
     #display output image
     plt.imshow(output_image)
     plt.show()