コード例 #1
0
ファイル: StreamViewer.py プロジェクト: wisam12/Streaming
    def receive_stream(self):
        """
        Displays displayed stream in a window if no arguments are passed.
        Keeps updating the 'current_frame' attribute with the most recent frame, this can be accessed using 'self.current_frame'
        :param display: boolean, If False no stream output will be displayed.
        :return: None
        """
        global oldpoch, current_frames
        self.keep_running = True
        size = 0
        count_frame = 0
        while self.footage_socket and self.keep_running:
            try:
                frame = self.footage_socket.recv_string()
                count_frame = count_frame + 1
                size += self.utf8len(frame)
                if self.second_passed(oldpoch):
                    oldpoch = time.time()
                    print(str(size) + " " + str(count_frame))
                    size = 0
                    count_frame = 0
                current_frames.append(string_to_image(frame))

                # if display and len(self.current_frames) >= 30:
                #     for img in range(len(self.current_frames)):
                #         cv2.imshow("Stream", self.current_frames.pop())
                #         cv2.waitKey(1)

            except KeyboardInterrupt:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
コード例 #2
0
    def receive_stream(self, display=True):
        """
        Displays displayed stream in a window if no arguments are passed.
        Keeps updating the 'current_frame' attribute with the most recent frame, this can be accessed using 'self.current_frame'
        :param display: boolean, If False no stream output will be displayed.
        :return: None
        """
        self.keep_running = True
        fps = FPS()
        fps.start()
        while self.footage_socket and self.keep_running:
            try:
                frame = self.footage_socket.recv_string()
                self.current_frame = string_to_image(frame)

                if display:
                    frame = imutils.resize(self.current_frame, width=400)
                    (h, w) = frame.shape[:2]
                    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
                                                 0.007843, (300, 300), 127.5)

                    net.setInput(blob)
                    detections = net.forward()

                    detection_string = []
                    for i in np.arange(0, detections.shape[2]):
                        confidence = detections[0, 0, i, 2]
                        if confidence > args["confidence"]:
                            idx = int(detections[0, 0, i, 1])
                            box = detections[0, 0, i, 3:7] * np.array(
                                [w, h, w, h])
                            (startX, startY, endX, endY) = box.astype("int")

                            label = "{}: {:.2f}%".format(
                                CLASSES[idx], confidence * 100)
                            detection_string.append(label)
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), COLORS[idx], 2)
                            y = startY - 15 if startY - 15 > 15 else startY + 15
                            cv2.putText(frame, label, (startX, y),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        COLORS[idx], 2)

                    fps.update()
                    fps.stop()
                    text = "FPS: {:.2f}".format(fps.fps())
                    cv2.putText(frame, text, (10, h - 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
                    cv2.imshow("Stream", frame)
                    key = cv2.waitKey(1)
                    response = ','.join(detection_string)
                    self.footage_socket.send_string(response)

            except:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
コード例 #3
0
ファイル: receiver.py プロジェクト: krandiash/SmoothStream
    def receive_payload_image_only(self, payload):
        frame, id, timestamp = payload.split(self.separator)

        if time.time() - float(timestamp) > 0.5:
            print("Dropping %s" % id)
            return False

        id = int(id)
        print(id)

        self.current_frame = string_to_image(frame)

        return True
コード例 #4
0
    def receive_stream(self, display=True):

        self.keep_running = True
        while self.footage_socket and self.keep_running:
            print(self.footage_socket)
            try:
                print('try')
                frame = self.footage_socket.recv_string()
                print('fname', frame)
                self.current_frame = string_to_image(frame)

                if display:
                    cv2.imshow("Stream", self.current_frame)
                    cv2.waitKey(1)

            except KeyboardInterrupt:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
コード例 #5
0
ファイル: processor.py プロジェクト: benclermontt/KapKeep
    def receive_stream(self, display=True):
        """
        Displays displayed stream in a window if no arguments are passed.
        Keeps updating the 'current_frame' attribute with the most recent frame, this can be accessed using 'self.current_frame'
        :param display: boolean, If False no stream output will be displayed.
        :return: None
        """
        self.keep_running = True
        while self.footage_socket and self.keep_running:
            try:
                frame = self.footage_socket.recv_string()
                self.current_frame = string_to_image(frame)

                with self.frame_deque_lock:
                    self.frame_deque.append(self.current_frame)

            except KeyboardInterrupt:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
    def receive_stream(self, display=True):
        """
		Displays displayed stream in a window if no arguments are passed.
		Keeps updating the 'current_frame' attribute with the most recent frame, this can be accessed using 'self.current_frame'
		:param display: boolean, If False no stream output will be displayed.
		:return: None
		"""
        self.keep_running = True
        fps = FPS()
        fps.start()
        initBB = None
        pastBB = None
        ite_counter = 0
        overall_confidence = None
        success = None
        tracker = cv2.TrackerMedianFlow_create()

        while self.footage_socket and self.keep_running:
            try:
                client_response = []
                f = self.footage_socket.recv_string()
                self.current_frame = string_to_image(f)

                if display:
                    frame = self.current_frame
                    (H, W) = frame.shape[:2]
                    #matrix = cv2.getRotationMatrix2D((W/2, H/2), 270, 1)
                    #frame = cv2.warpAffine(frame,matrix,(W,H))
                    #(H, W) = frame.shape[:2]

                    if initBB is not None:
                        (success, box) = tracker.update(frame)

                        if success:
                            (x, y, w, h) = [int(v) for v in box]
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (255, 0, 0), 2)

                            center = (x + w / 2, y + h / 2)

                            try:
                                offset = (W / 2 - center[0], H / 2 - center[1],
                                          z)
                            except:
                                offset = (W / 2 - center[0], H / 2 - center[1])

                            old_center = (pastBB[0] + pastBB[2] / 2,
                                          pastBB[1] + pastBB[3] / 2)
                            old_offset = (W / 2 - old_center[0],
                                          H / 2 - old_center[1])

                            offset_w = 2 * float(W / w)
                            offset_h = 5 * float(H / h)

                            if float(W * H) / float(w * h) > 2:
                                try:
                                    if offset[0] > offset_w:  #right
                                        client_response.append('6')
                                    elif offset[0] < -offset_w:  #left
                                        client_response.append('4')
                                except ValueError:
                                    pass

                                try:
                                    if offset[1] > offset_h:  #up
                                        client_response.append("8")
                                    elif offset[1] < -offset_h:  #down
                                        client_response.append("2")
                                except ValueError:
                                    pass

                            pastBB = (x, y, w, h)

                            if ite_counter > 20:
                                ite_counter = 0
                                try:
                                    target = frame[y:y + h, x:x + w]
                                    blob = cv2.dnn.blobFromImage(
                                        cv2.resize(target, (300, 300)),
                                        0.007843, (300, 300), 127.5)
                                    net.setInput(blob)
                                    detections = net.forward()

                                    for i in np.arange(0, detections.shape[2]):
                                        confidence = detections[0, 0, i, 2]
                                        if confidence > 0.8:
                                            idx = int(detections[0, 0, i, 1])
                                            box = detections[0, 0, i,
                                                             3:7] * np.array(
                                                                 [w, h, w, h])
                                            (startX, startY, endX,
                                             endY) = box.astype("int")
                                            if CLASSES[idx] != TARGET_CLASS:
                                                initBB = None
                                                tracker = cv2.TrackerMedianFlow_create(
                                                )
                                            else:
                                                if (float(
                                                        abs(startX - endX) *
                                                        abs(startY - endY)) /
                                                        float(w * h) < 0.8):
                                                    initBB = None
                                                    tracker = cv2.TrackerMedianFlow_create(
                                                    )
                                                else:
                                                    overall_confidence = confidence
                                        else:
                                            initBB = None
                                            tracker = cv2.TrackerMedianFlow_create(
                                            )
                                except:
                                    initBB = None
                                    tracker = cv2.TrackerMedianFlow_create()
                            else:
                                ite_counter += 1
                        else:
                            initBB = None
                            tracker = cv2.TrackerMedianFlow_create()
                    else:
                        blob = cv2.dnn.blobFromImage(
                            cv2.resize(frame, (300, 300)), 0.007843,
                            (300, 300), 127.5)

                        net.setInput(blob)
                        detections = net.forward()

                        for i in np.arange(0, detections.shape[2]):
                            confidence = detections[0, 0, i, 2]
                            if confidence > 0.8:
                                idx = int(detections[0, 0, i, 1])
                                box = detections[0, 0, i, 3:7] * np.array(
                                    [W, H, W, H])
                                (startX, startY, endX,
                                 endY) = box.astype("int")
                                if CLASSES[idx] != TARGET_CLASS:
                                    overall_confidence = 0
                                    pass
                                else:
                                    overall_confidence = confidence
                                    initBB = (startX, startY,
                                              abs(endX - startX),
                                              abs(endY - startY))
                                    pastBB = initBB
                                    tracker.init(frame, initBB)
                            else:
                                overall_confidence = 0

                    fps.update()
                    fps.stop()

                    info = [("Tracker", "medianflow"),
                            ("Track success", "Yes" if success else "No"),
                            ("FPS", "{:.2f}".format(fps.fps())),
                            ("Object", TARGET_CLASS),
                            ("Confidence", "{:.2f}".format(overall_confidence))
                            ]

                    for (i, (k, v)) in enumerate(info):
                        text = "{}: {}".format(k, v)
                        cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0),
                                    2)

                    cv2.imshow("Object Tracker: {}".format(TARGET_CLASS),
                               frame)
                    key = cv2.waitKey(1)
                    for i in range(len(client_response), 2):
                        client_response.append('0')
                    self.footage_socket.send_string(','.join(client_response))

            except KeyboardInterrupt:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
コード例 #7
0
ファイル: receiver.py プロジェクト: krandiash/SmoothStream
 def receive_camera_payload(self, payload):
     frame, id = payload.split(self.separator)
     id = int(id)
     frame = string_to_image(frame)
     self.all_frames[id] = frame
    def receive_stream(self, display=True):
        from firebase import firebase
        """
        Displays displayed stream in a window if no arguments are passed.
        Keeps updating the 'current_frame' attribute with the most recent frame, this can be accessed using 'self.current_frame'
        :param display: boolean, If False no stream output will be displayed.
        :return: None
        """
        cap = cv2.VideoCapture(0)
        state = [0, 0, 0, 0, 0]  # fivefingers, 0 miss, 1 straight, 2 bent
        area = [0, 0, 0, 0, 0]
        length = [0, 0, 0, 0, 0]
        application = firebase.FirebaseApplication('https://iotproj-510ee.firebaseio.com', None)

        self.keep_running = True
        while self.footage_socket and self.keep_running:
            try:
                frame = self.footage_socket.recv_string()
                self.current_frame = string_to_image(frame)
                imgs = []
                blank_image = np.zeros((frame.shape[0], frame.shape[1], 3), np.float32)
                for i in range(0,3):
                    img = string_to_image(self.footage_socket.recv_string())
                    imgs.append(img.astype(np.float32))
                for img in imgs:
                    blank_image = img/3 + blank_image
                aveimg = blank_image.astype(np.uint8)
                img = aveimg
                width = img.shape[1]
                height = img.shape[0]
                thumb, forefinger, midfinger, ringfinger, pinky, rectarea, length_c, tops = detectfingers(img)

                fingers = [thumb, forefinger, midfinger, ringfinger, pinky]

                for i, finger in enumerate(fingers):

                    if finger != 6:
                        # print(length_c[finger], length[i])
                        if state[i] == 0:
                            state[i] = 1
                        elif state[i] == 2 and length_c[finger] >= 1.2 * length[i]:
                            # if length_c[finger] >= 1.2 * length[i] or rectarea[finger] >= 1.2 * area[i]:
                            state[i] = 1
                        elif state[i] == 1 and length_c[finger] <= 0.86 * length[i]:
                            # if length_c[finger] <= 0.8 * length[i] or rectarea[finger] >= 1.2 * area[i]:
                            # print(length_c[finger], length[i])

                            x = int(tops[finger][0] * 408 / width)
                            # print(width, tops[finger][0], x)
                            # print(height)
                            y = int((height - tops[finger][1]) * 408 / height)
                            data = {'x': x, 'y': y}
                            print(data)
                            globaldata.append(data)
                            state[i] = 2
                        length[i] = length_c[finger]


                        area[i] = rectarea[finger]
                    else:
                        state[i] = 0

            except KeyboardInterrupt:
                cv2.destroyAllWindows()
                break
        print("Streaming Stopped!")
コード例 #9
0
ファイル: app.py プロジェクト: caiomarqs/iris-ia
# Obtem uma referência para a webcam
video_capture = cv2.VideoCapture(0)

# Criando lista de rostos conhecidos
known_face_encodings = []

# Convertendo a lista de string para int
users_directories = list(map(int, users_directories))

# Carregando imagem do cliente e aprendendo reconhecê-lo
for i in range(len(users_directories)):
    basedir = 'Images/' + str(users_directories[i])
    diretorio = listdir(basedir)
    base64img = open(basedir + '/' + diretorio[0]).read()
    image = utils.string_to_image(base64img)

    cliente_image = face_recognition.load_image_file(image)
    known_face_encodings.append(
        face_recognition.face_encodings(cliente_image)[0])
    users_directories[i] = users_directories[i] + 1

# Initializando variáveis
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True


def gen_frames():
    process_this_frame = True