Пример #1
0
def worker_pose(input_q, output_q, cap_params, frame_processed):
    import tensorflow as tf
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph(
        r'/home/testuser/tf_tut/tf_image_classifier/tf_files/retrained_graph.pb'
    )
    sess = tf.Session(graph=detection_graph)
    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #2
0
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()

    while True:
        frame = input_q.get()
        if (frame is not None):
            im_width = cap_params['im_width']
            im_height = cap_params['im_height']

            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)

            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)

            cropped_img = detector_utils.get_box_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"],
                scores, boxes, cap_params['im_width'], cap_params['im_height'],
                frame)

            frame_processed += 1
            output_q.put(frame)
            cropped_output_q.put(cropped_img)
        else:
            output_q.put(frame)
            cropped_output_q.put(cropped_img)
Пример #3
0
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    saver = tf.train.Saver()
    saver.save(sess, "./graph_save/saved_graph.ckpt")
    exit(0)
    while True:
        # print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if frame is not None:
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            print("frame: {}\nscores: {}\nboxes: {}".format(
                frame_processed, scores, boxes))
            print(boxes.shape, scores.shape)
            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)
            # add frame annotated with bounding box to queue
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #4
0
def worker(input_q, output_q, output_boxes, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
            # while scores contains the confidence for each of these boxes.
            # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)
            # add frame annotated with bounding box to queue
            output_q.put(frame)
            output_boxes.put(boxes[0])
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
def detect_hand(cap,
                detection_graph,
                sess,
                imw,
                imh,
                num_hands=1,
                thresh=0.15):
    for i in range(5):
        cap.grab()  # Disregard old frames
    ret, image_np = cap.read()
    while not ret:  # In case an image is not captured
        ret, image_np = cap.read()

    #image_np = cv2.undistort(image_np, CAMERAMATRIX, DISTORTION)
    try:
        image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
    except:
        print("Error converting to RGB")

    box, sc = detector_utils.detect_objects(image_np, detection_graph, sess)
    detector_utils.draw_box_on_image(num_hands, thresh, sc, box, imw, imh,
                                     image_np)
    conts = detector_utils.get_box_size(num_hands, thresh, sc, box, imw, imh,
                                        image_np)
    ret = np.array([
        np.concatenate(
            [pixels_to_cartesian(*get_center(*c)), [get_height(*c), 0, 0, 0]])
        for c in conts
    ])
    return ret, image_np
Пример #6
0
    def img_callback(self, data):
        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        detector_utils.draw_box_on_image(self.num_objects_detect,
                                         self.score_thresh, scores, boxes,
                                         classes, self.im_width,
                                         self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.publisher.publish(self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)
Пример #7
0
def run_session(tup):
    import tensorflow as tf

    frame, cap_params, path = tup
    # sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0, 'CPU': 2}, log_device_placement=False))
    print("> ===== in worker loop, frame ")
    with tf.device("/cpu:0"):
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(path, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
        with tf.Session(graph=detection_graph) as sess:
            if (frame is not None):
                # actual detection
                boxes, scores = detector_utils.detect_objects(
                    frame, detection_graph, sess)
                # draw bounding boxes
                detector_utils.draw_box_on_image(
                    cap_params['num_hands_detect'],
                    cap_params['score_thresh'],
                    scores,
                    boxes,
                    cap_params['im_width'],
                    cap_params['im_height'],
                    frame)
Пример #8
0
def draw_gui(input_frame,
             detection_graph,
             sess,
             num_hands_detect=2,
             score_thresh=0.2):

    height, width, channels = input_frame.shape
    i_count = 0

    try:
        image_np = cv2.cvtColor(input_frame, cv2.COLOR_BGR2RGB)
    except:
        print("Error converting to RGB")

    boxes, scores = detector_utils.detect_objects(image_np, detection_graph,
                                                  sess)

    for i in range(num_hands_detect):
        if (scores[i] > score_thresh):
            i_count += 1

    detector_utils.draw_box_on_image(num_hands_detect, score_thresh, scores,
                                     boxes, width, height, image_np)

    cv2.putText(image_np, "Hands=" + str(i_count), (25, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 1)
    return cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
Пример #9
0
    def detect_faces(self, image: np.ndarray):
        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)
        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=(self._min_size))

        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x + w, y + h), self._red,
                          self._width)
        ####################### hand ####################
        try:
            image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        boxes, scores = detector_utils.detect_objects(image_np,
                                                      self.detection_graph,
                                                      self.sess)

        # draw bounding boxes on frame
        detector_utils.draw_box_on_image(2, 0.5, scores, boxes, 320, 180,
                                         image)
        touched = False
        for (x, y, w, h) in faces:
            for i in range(0, 1):
                #                qDebug("********Face********")
                #                qDebug(str(x))
                #                qDebug(str(y))
                #                qDebug(str(x+w))
                #                qDebug(str(y+h))
                #                qDebug("########Hand########")
                #                qDebug(str(boxes[i][1]*320))
                #                qDebug(str(boxes[i][0]*180))
                #                qDebug(str(boxes[i][3]*320))
                #                qDebug(str(boxes[i][2]*180))
                #                qDebug(str(scores[i]))
                #                qDebug(str(self.isTouching(x,y,x+w,y+h,boxes[i][1]*320,boxes[i][0]*180,boxes[i][3]*320,boxes[i][2]*180)))

                if self.isTouching(x, y, x + w, y + h, boxes[i][0] * 320,
                                   boxes[i][1] * 180, boxes[i][2] * 320,
                                   boxes[i][3] * 180) and scores[i] > 0.5:
                    touched = True

        if touched:
            winsound.Beep(440, 50)  # frequency, duration

        #################################################

        self.image = self.get_qimage(image)
        if self.image.size() != self.size():
            self.setFixedSize(self.image.size())

        self.update()
Пример #10
0
    def viewCam(self):
        start_time = datetime.datetime.now()
        num_frames = 0
        im_width, im_height = (self.cap.get(3), self.cap.get(4))
        # max number of hands we want to detect/track
        num_hands_detect = 2

        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        ret, image_np = self.cap.read()
        # image_np = cv2.flip(image_np, 1)
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
        # while scores contains the confidence for each of these boxes.
        # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

        boxes, scores = detector_utils.detect_objects(image_np,
                                                    detection_graph, sess)

        # draw bounding boxes on frame
        detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                        scores, boxes, im_width, im_height,
                                        image_np)

        # Calculate Frames per second (FPS)
        num_frames += 1
        elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
        fps = num_frames / elapsed_time

        detector_utils.draw_fps_on_image("FPS : " + str(int(fps)), image_np)

        # cv2.imshow('Single-Threaded Detection',
        #         cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))

        # convert image to RGB format
        image = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        # get image infos
        height, width, channel = image_np.shape
        step = channel * width
        # create QImage from image
        qImg = QImage(image_np.data, width, height, step, QImage.Format_RGB888)
        # show image in img_label
        
        #audio
        #audio_frame, val = self.player.get_frame()
        
        if self.i:
            self.player = MediaPlayer("vid1.MP4")
            self.i = False

        self.player.get_frame()
        self.ui.image_label.setPixmap(QPixmap.fromImage(qImg))
def worker(input_q, output_q, cropped_output_q, inferences_q, cap_params,
           frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)

    print(">> loading keras model for worker")
    try:
        model, classification_graph, session = classifier.load_KerasGraph(
            "./cnn/cnn.h5")
    except Exception as e:
        print(e)

    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
            # while scores contains the confidence for each of these boxes.
            # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)

            # get region of interest
            res = detector_utils.get_box_image(cap_params['num_hands_detect'],
                                               cap_params["score_thresh"],
                                               scores, boxes,
                                               cap_params['im_width'],
                                               cap_params['im_height'], frame)

            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)

            # classify hand pose
            if res is not None:
                res = cv2.cvtColor(res, cv2.COLOR_RGB2GRAY)
                res = cv2.resize(res, (28, 28), interpolation=cv2.INTER_AREA)
                class_res = classifier.classify(model, classification_graph,
                                                session, res)
                inferences_q.put(class_res)

            # add frame annotated with bounding box to queue
            cropped_output_q.put(res)
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #12
0
def detection(detection_graph):
    with tf.Session(graph=detection_graph) as sess:
        if (frame is not None):
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)

            # draw bounding boxes
            detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh, scores, boxes, im_width, im_height, frame,
                                             detection_graph, sess)

            # label, score = detector_utils.classify_object(frame, detection_graph, sess)

        return scores, boxes
Пример #13
0
    def img_callback(self, data):
        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (
            datetime.datetime.now() - self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.publisher.publish(self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        for i in range(self.num_objects_detect):
            if (scores[i] > self.score_thresh):
                (left, right, top, bottom) = (boxes[i][1] * self.im_width,
                                              boxes[i][3] * self.im_width,
                                              boxes[i][0] * self.im_height,
                                              boxes[i][2] * self.im_height)
                # top left corner of bbox
                p1 = np.array([int(left), int(top)])
                # bottom right corner of bbox
                p2 = np.array([int(right), int(bottom)])
                mid_point = (p1 + p2) / 2
                self.dice_publisher.publish(
                    Point(mid_point[0], mid_point[1], classes[i]))
Пример #14
0
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.compat.v1.Session(graph=detection_graph)
    while True:
        frame = input_q.get()
        if (frame is not None):
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            boxes_to_recog, scores_to_show = detector_utils.draw_box_on_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"],
                scores, boxes, cap_params['im_width'], cap_params['im_height'],
                frame)
            b_have_hand, img_roi, img_extend = recognizer_utils.drawBoxOfROI(
                scores_to_show, boxes_to_recog, 0.2, 0.8,
                cap_params['im_width'], cap_params['im_height'], frame)
            img_roi, str_gesture = recognizer_utils.processROI(b_have_hand, img_roi, img_extend)
            # add frame annotated with bounding box to queue
            output_q.put(frame)
            output_q.put(img_roi)
            output_q.put(str_gesture)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #15
0
def movie_controler_predict(imagebase64):
    global oi
    image_np = stringToRGB(imagebase64)
    cv2.imwrite("test/{}.jpg".format(oi), image_np)
    im_height, im_width, channels = image_np.shape
    print(str(im_height), str(im_width))

    boxes, scores, classes, num_hand = detector_utils.detect_objects(
        image_np, detection_graph, sess)
    # draw bounding boxes on frame
    img = detector_utils.draw_box_on_image(1, SCORE_THRESH, scores, boxes,
                                           im_width, im_height, image_np)

    x = np.argmax(scores)
    if scores[x] > SCORE_THRESH:
        print('predict')
        a = classes[x]
        if TWO_STATE:
            # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            a = model2_predict(img, graph2, sess2)
            label = np.argmax(a)
            print('result            ' + str(label))
            print("score:" + str(np.max(a)))
            if np.max(a) > 0.5:
                cv2.imwrite("test/{}.jpg".format(str(label) + str(scores[x])),
                            img)
                return label
            else:
                return 5
    return 5
Пример #16
0
def main():
    emojis = get_emojis()

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

    im_width, im_height = (cap.get(3), cap.get(4))
    # max number of hands we want to detect/track
    num_hands_detect = 1

    cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)

    while True:
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        ret, image_np = cap.read()
        image_np = cv2.flip(image_np, 1)
        # image_np = cv2.flip(image_np, 1)
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
        # while scores contains the confidence for each of these boxes.
        # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)

        # draw bounding boxes on frame
        img = detector_utils.draw_box_on_image(num_hands_detect, 0.4, scores,
                                               boxes, im_width, im_height,
                                               image_np)
        image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        mask2 = cv2.inRange(hsv, np.array([2, 50, 60]),
                            np.array([25, 150, 255]))
        res = cv2.bitwise_and(img, img, mask=mask2)
        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        median = cv2.GaussianBlur(gray, (5, 5), 0)

        kernel_square = np.ones((5, 5), np.uint8)
        dilation = cv2.dilate(median, kernel_square, iterations=2)
        opening = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel_square)
        ret, thresh = cv2.threshold(opening, 30, 255, cv2.THRESH_BINARY)

        newImage = cv2.resize(thresh, (50, 50))
        pred_probab, pred_class = keras_predict(model, newImage)
        print(pred_class, pred_probab)
        image_np = overlay(image_np, emojis[pred_class], 400, 300, 90, 90)

        cv2.imshow('Single-Threaded Detection', image_np)
        cv2.imshow('img', img)

        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
Пример #17
0
def hand_detection(ini_time):
    cap = cv2.VideoCapture(args.video_source)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)

    start_time = datetime.datetime.now()
    num_frames = 0
    im_width, im_height = (cap.get(3), cap.get(4))
    # max number of hands we want to detect/track
    num_hands_detect = 2

    cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
    cv2.namedWindow('Hand wash status', cv2.WINDOW_NORMAL)

    count = 0

    while (time.time() - ini_time) <= 30:
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        ret, image_np = cap.read()
        # image_np = cv2.flip(image_np, 1)
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
        # while scores contains the confidence for each of these boxes.
        # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)

        # draw bounding boxes on frame
        count += detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                         scores, boxes, im_width, im_height,
                                         image_np)

        # Calculate Frames per second (FPS)
        num_frames += 1
        elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
        fps = num_frames / elapsed_time

        if (args.display > 0):
            # Display FPS on frame
            if (args.fps > 0):
                detector_utils.draw_fps_on_image("FPS : " + str(int(fps)),
                                                 image_np)

            cv2.imshow('Single-Threaded Detection',
                       cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))

            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyWindow('Single-Threaded Detection')
                break
        else:
            print("frames processed: ", num_frames, "elapsed time: ",
                  elapsed_time, "fps: ", str(int(fps)))

        cv2.destroyWindow('Single-Threaded Detection')
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            detector_utils.draw_box_on_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)
            # add frame annotated with bounding box to queue
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #19
0
    def img_callback(self, data):
        self.parse_label_map()
        # if self.check_timestamp():
        # return None

        cv_image = data
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
        self.im_height, self.im_width, channels = cv_image.shape
        print("Height ", self.im_height, " Width: ", self.im_width)
        #  cv_image = cv2.resize(cv_image, (256, 144))
        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image, self.target)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)
        print("bbox:", bbox)
        if len(bbox) > 0:
            pointx = (bbox[0][0] + bbox[1][0]) / 2
            pointy = (bbox[0][1] + bbox[1][1]) / 2
            pointxdist = abs(bbox[0][0] - bbox[1][0])
            pointydist = abs(bbox[0][1] - bbox[1][1])
            print(pointxdist)
            msg = Point(x=pointx,
                        y=pointy,
                        z=self.see_sub.last_image_time.to_sec())
            print("X: ", pointx, "Y: ", pointy, "TIMESTAMP: ", msg.z)
            self.bbox_pub.publish(msg)
            roi = RegionOfInterest(x_offset=int(bbox[0][0]),
                                   y_offset=int(bbox[0][1]),
                                   height=int(pointydist),
                                   width=int(pointxdist))
            self.roi_pub.publish(roi)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)
Пример #20
0
def detect_image(input_img):
    image_np = cv2.imread(input_img)
    im_width = np.size(image_np, 1)
    im_height = np.size(image_np, 0)
    try:
        image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
    except:
        print("Error converting to RGB")
    start = time()
    boxes, scores = detector_utils.detect_objects(image_np, detection_graph,
                                                  sess)
    t = time() - start
    print(t, "sec")
    detector_utils.draw_box_on_image(num_hands_detect, score_thresh,\
     scores, boxes, im_width, im_height,image_np)

    cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
    cv2.imshow('Single-Threaded Detection',
               cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
    if cv2.waitKey() & 0xFF == ord('q'):
        cv2.destroyAllWindows()
Пример #21
0
def worker(input_q, output_q, cap_params, frame_processed, points):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    prevPoint = None
    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
            # while scores contains the confidence for each of these boxes.
            # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            detectedPoint = detector_utils.draw_box_on_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"],
                scores, boxes, cap_params['im_width'], cap_params['im_height'],
                frame)
            point = detectedPoint

            if detectedPoint is not None:
                #print(detectedPoint)
                if prevPoint is not None:
                    if aboveThreshhold(
                            prevPoint,
                            cap_params["im_height"]) and not aboveThreshhold(
                                detectedPoint, cap_params["im_height"]):
                        print("Detected: " + str(detectedPoint) + "Prev: " +
                              str(prevPoint))
                        print("Under")
                        points.put(detectedPoint)
                    #else:
                    #points.put((-1, -1))
                    #callHits.get()(detectedPoint[0], cap_params["im_height"], cap_params["im_width"])
                    #call circle collision thing
                #else:
                #points.put((-1, -1))

                prevPoint = detectedPoint
            #else:
            #points.put((-1, -1))
            # add frame annotated with bounding box to queue
            output_q.put(frame)
            frame_processed += 1
        else:
            #points.put((-1, -1))
            output_q.put(frame)
    sess.close()
    def start(self):
        self.is_kill=False
        self.is_pause=False
        self.stop_button.setEnabled(True)
        self.pause_button.setEnabled(True)
        self.start_button.setEnabled(False)

        score_thresh = 0.25

        if args['is_cv2vidcap']:
            vs = cv2.VideoCapture(args['vid_path'])
        else:
            vs = VideoStream(args['vid_path']).start()

        while True:
            if not self.is_pause:
                if args['is_cv2vidcap']:
                    _,frame = vs.read()
                else:
                    frame = vs.read()

                frame= np.array(frame)

                h, w = frame.shape[:2]
               # print(frame.shape)
                new_width=self.label_width
                new_height=int(h*(self.label_width/w))
                frame=imutils.resize(frame,width=new_width,height=new_height)
                h, w = frame.shape[:2]
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                #    print(frame.shape)
                boxes, scores, classes,num_detections = du.detect_objects(frame, model)


                frame=du.draw_box_on_image(frame,boxes, scores, classes, score_thresh)

                cars_in_danger=finding_cars_in_danger(int(num_detections), score_thresh, scores, boxes, classes,
                                                      self.current_safety_distance_value)

                locate_cars_in_danger(cars_in_danger,frame)

                frame=np.ascontiguousarray(frame[:,:,::-1])  # BGR to RGB

                frame = QImage(frame, frame.shape[1],frame.shape[0],frame.strides[0],QImage.Format_RGB888)
                image=frame.scaled(w, h, Qt.KeepAspectRatio)
            self.image_label.setPixmap(QtGui.QPixmap.fromImage(image))

            key = cv2.waitKey(1) & 0xFF   ### important line
            if self.is_kill:
                break
Пример #23
0
def worker_hand(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph(
        r'/home/testuser/tf_tut/obj_detection/handtracking/hand_inference_graph/frozen_inference_graph.pb'
    )
    while True:
        print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
            break
    sess.close()
Пример #24
0
def detect_video(input_vid, output_vid):
    cap = cv2.VideoCapture(input_vid)
    im_width, im_height = (cap.get(3), cap.get(4))
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(output_vid, fourcc, 20.0,
                          (int(im_width), int(im_height)))
    count = 0
    while (cap.isOpened()):
        ret, image_np = cap.read()
        if ret == True:
            count += 1
            try:
                image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
            except:
                print("Error converting to RGB")
            if (count % 3 == 0):
                boxes, scores = detector_utils.detect_objects(
                    image_np, detection_graph, sess)

                # draw bounding boxes on frame
                detector_utils.draw_box_on_image(num_hands_detect,
                                                 score_thresh, scores, boxes,
                                                 im_width, im_height, image_np)
                image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
                out.write(image_np)
                if (count % 30 == 0):
                    print("save " + str(count), end="\r", flush=True)
                cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
                cv2.imshow('Single-Threaded Detection', image_np)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
        else:
            break
    cap.release()
    out.release()
    cv2.destroyAllWindows()
Пример #25
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
            return

        (rows, cols, channels) = cv_image.shape

        # actual detection
        boxes, scores = detector_utils.detect_objects(cv_image,
                                                      self.detection_graph,
                                                      self.sess)

        # draw bounding boxes
        detector_utils.draw_box_on_image(self.hands, self.thr, scores, boxes,
                                         cols, rows, cv_image)

        #

        cv2.imshow("Image window", cv_image)
        cv2.waitKey(25)

        return
Пример #26
0
def worker(input_q, output_q, cap_params, frame_processed):
    global called, c, ges, score
    if not called:
        gesture.load_model()
        called =True

    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)

    while True:
        print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()

        if frame is not None:
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)
            details = {
                'boxes': boxes,
                'scores': scores
            }
            # draw bounding boxes
            cropped_image = detector_utils.draw_box_on_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['width']
                , cap_params['height'], frame)

            if cropped_image is not None and c == 0:
                cropped_image = cv2.flip(cropped_image, 1)
                ges, score = gesture.predict(cropped_image/255)
            print(ges, score)
            details['frame'] = frame
            details['cropped_image'] = cropped_image
            details['ges'] = ges
            details['score'] = score

            output_q.put(details)
            frame_processed += 1
        else:   
            output_q.put({
                'boxes': [],
                'frame': frame,
                'ges': ges,
                'score': score
            })
    c = (c+1) % 10
    sess.close()
        # image_np = cv2.flip(image_np, 1)
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        cv2.imshow("source image np", image_np)
        # actual detection
        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)

        # print boxes

        # draw bounding boxes
        detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                         scores, boxes, im_width, im_height,
                                         image_np)

        if (scores[0] > args.score_thresh):
            (left, right, top,
             bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
                        boxes[0][0] * im_height, boxes[0][2] * im_height)
            p1 = (int(left), int(top))
            p2 = (int(right), int(bottom))
            # print p1,p2,int(left),int(top),int(right),int(bottom)
            image_hand = image_np[int(top):int(bottom), int(left):int(right)]
            cv2.namedWindow("hand", cv2.WINDOW_NORMAL)
            cv2.imshow('hand', cv2.cvtColor(image_hand, cv2.COLOR_RGB2BGR))

            align_hand = color_image[int(top):int(bottom),
                                     int(left):int(right)]
Пример #28
0
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    prev_areas = []
    prev_toplefts = []
    frame_count = 0
    while True:
        #print("> ===== in worker loop, frame ", frame_processed)
        frame = input_q.get()
        if (frame is not None):
            # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
            # while scores contains the confidence for each of these boxes.
            # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)

            # draw bounding boxes
            detector_utils.draw_box_on_image(cap_params['num_hands_detect'],
                                             cap_params["score_thresh"],
                                             scores, boxes,
                                             cap_params['im_width'],
                                             cap_params['im_height'], frame)

            toplefts, bottomrights, areas = detector_utils.get_corners(
                cap_params['num_hands_detect'], cap_params["score_thresh"],
                scores, boxes, cap_params['im_width'], cap_params['im_height'])

            if frame_count >= 2:
                if len(prev_areas) == 0 or len(areas) == 0:
                    pass
                else:
                    dist = {}
                    for current_index in range(len(toplefts)):
                        for prev_index in range(len(prev_toplefts)):
                            dist[point_distance(
                                toplefts[current_index],
                                prev_toplefts[prev_index])] = (current_index,
                                                               prev_index)
                    indices = dist[min(dist.keys())]
                    current_area = areas[current_index]
                    prev_area = prev_areas[prev_index]
                    if compare_areas(current_area, prev_area, 2):
                        print("current:", current_area)
                        print("prev:", prev_area)
                        splatters.append(
                            Splatter(toplefts[current_index],
                                     bottomrights[current_index]))
                        frame_count = 0
                    if len(toplefts) == 2 and len(prev_toplefts) == 2:
                        if compare_areas(areas[1 - current_index],
                                         prev_areas[1 - prev_index], 2):
                            print("current:", current_area)
                            print("prev:", prev_area)
                            splatters.append(
                                Splatter(toplefts[1 - current_index],
                                         bottomrights[1 - current_index]))
                            frame_count = 0
            else:
                frame_count += 1

            # for x in range(0, len(toplefts)):
            #     splatters.append(Splatter(toplefts[x], bottomrights[x]))

            for splotch in splatters:
                if splotch.opacity == 0:
                    splatters.remove(splotch)
                    continue

                roi = frame[splotch.topleft[1]:splotch.bottomright[1],
                            splotch.topleft[0]:splotch.bottomright[0]]
                background = roi.copy()
                overlap = roi.copy()
                background[splotch.outline[:, :, 3] != 0] = (0, 0, 0)
                overlap[splotch.outline[:, :, 3] == 0] = (0, 0, 0)
                overlap_area = cv2.addWeighted(overlap, 1 - splotch.opacity,
                                               splotch.outline[:, :, 0:3],
                                               splotch.opacity, 0)
                dst = cv2.add(overlap_area, background)
                frame[splotch.topleft[1]:splotch.bottomright[1],
                      splotch.topleft[0]:splotch.bottomright[0]] = dst
                splotch.fade()

            prev_areas = areas.copy()
            prev_toplefts = toplefts.copy()

            # add frame with splatters to queue (below)
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #29
0
            try:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            except:
                print("Error converting to RGB")
            # cv2.line(img=frame, pt1=(0, Line_Position1), pt2=(frame.shape[1], Line_Position1), color=(255, 0, 0), thickness=2, lineType=8, shift=0)

            # cv2.line(img=frame, pt1=(0, Line_Position2), pt2=(frame.shape[1], Line_Position2), color=(255, 0, 0), thickness=2, lineType=8, shift=0)

            # Run image through tensorflow graph
            boxes, scores, classes = detector_utils.detect_objects(
                frame, detection_graph, sess)

            Line_Position2 = orien_lines.drawsafelines(frame, Orientation, Line_Perc1, Line_Perc2)
            # Draw bounding boxes and text
            a, b = detector_utils.draw_box_on_image(
                num_hands_detect, score_thresh, scores, boxes, classes, im_width, im_height, frame, Line_Position2,
                Orientation)
            lst1.append(a)
            lst2.append(b)
            no_of_time_hand_detected = no_of_time_hand_crossed = 0
            # Calculate Frames per second (FPS)
            num_frames += 1
            elapsed_time = (datetime.datetime.now() -
                            start_time).total_seconds()
            fps = num_frames / elapsed_time

            if args['display']:

                # Display FPS on frame
                detector_utils.draw_text_on_image("FPS : " + str("{0:.2f}".format(fps)), frame)
                cv2.imshow('Detection', cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
Пример #30
0
            if im_height == None:
                im_height, im_width = frame.shape[:2]

            # Convert image to rgb since opencv loads images in bgr, if not accuracy will decrease
            try:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            except:
                print("Error converting to RGB")

            # Run image through tensorflow graph
            boxes, scores, classes = detector_utils.detect_objects(
                frame, detection_graph, sess)
            # Draw bounding boxeses and text
            a, b = detector_utils.draw_box_on_image(num_obj_detect,
                                                    score_thresh, scores,
                                                    boxes, classes, im_width,
                                                    im_height, frame)
            # Calculate Frames per second (FPS)
            num_frames += 1
            elapsed_time = (datetime.datetime.now() -
                            start_time).total_seconds()
            fps = num_frames / elapsed_time

            if args['display']:

                # Display FPS on frame
                detector_utils.draw_text_on_image(
                    "FPS : " + str("{0:.2f}".format(fps)), frame)
                cv2.imshow('Detection', cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
Пример #31
0
    def img_callback(self, data):

        if self.check_timestamp(data):
            return None

        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (
            datetime.datetime.now() - self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        # Find midpoint of the region of interest
        bbox_midpoint = [((bbox[0][1] + bbox[1][1]) / 2),
                         ((bbox[0][0] + bbox[1][0]) / 2)]
        # print(cv_image[int(bbox[0][0]):int(bbox[1][0]),
        # int(bbox[0][1]):int(bbox[1][1])])
        '''
        Confirm region of interest has orange where the bbox[0] contains the
        topleft coord and bbox[1] contains bottom right
        '''
        # create NumPy arrays from the boundaries
        lower = np.array(self.lower, dtype="uint8")
        upper = np.array(self.upper, dtype="uint8")
        # Run through the mask function, returns all black image if no orange
        check = self.mask_image(cv_image[int(bbox[0][1]):int(bbox[1][1]),
                                         int(bbox[0][0]):int(bbox[1][0])],
                                lower, upper)
        '''
        Find if we are centered on the region of interest, if not display its
        position relative to the center of the camera. Perform the check to see
        if we are looking at an image with orange in it. If not we are done
        here.
        '''
        check = cv2.cvtColor(check, cv2.COLOR_BGR2GRAY)
        if cv2.countNonZero(check) == 0:
            print('Check Failed.')
            return None
        else:
            # Where [0] is X coord and [1] is Y coord.
            self.find_direction(bbox_midpoint[1], bbox_midpoint[0])
        '''
        Once we center on the region of interest, assuming we are still
        locked on, calculate the curve of the marker.
        '''
        if self.centered:
            self.find_curve(check)