Ejemplo n.º 1
0
class Car(object):
    """docstring for Car"""
    def __init__(self, control):
        super(Car, self).__init__()
        self.live_stream = None
        self.camera = PiVideoStream(resolution=(320, 240), framerate=16)
        self.control = control
        self.driver = HumanDriver() 
    
    def exit(self):
        self.camera.stop()
        self.control.stop()
        if self.live_stream:
            self.live_stream.stop()
        print('exit')

    def start(self):
        i2c.setup(mode_motors=3)
        self.control.start()
        self.camera.start()

    @get_sensors
    @record # inputs (camera + sensor) and output
    def drive(self, img, sensors):
        if self.live_stream:
            self.live_stream.send(frame=img, sensors=sensors)

        command = self.control.read()

        if command == 'quit':
            self.exit()
            sys.exit(1)
        elif command == 'stream':
            try:
                if not self.live_stream:
                    self.live_stream = LiveStreamClient().start()
            except Exception as e:
                print('live_stream', e)
        elif command == 'stop':
            i2c.stop()
        
        if command == 'auto_logic_based':
            if not isinstance(self.driver, LogicBasedDriver):
                self.driver = LogicBasedDriver()
        elif command == 'auto_neural_network':
            if not isinstance(self.driver, AutonomousDriver):
                ai = AutonomousDriver().start()
                ai.learn()
                self.driver = ai # utonomousDriver().start()
        else:
            self.driver = HumanDriver()
            pass # human control

        speed, turn = self.driver.action(command, img)

        i2c.set_speed(speed)
        i2c.set_turn(turn)

        # CONSTRAINTS
        for sonar in i2c.SONARS:
            if sonar == i2c.I2C_SONAR_2:
                continue
            if sensors.get(str(sonar), [30])[0] < 20:
                i2c.stop()
        
        return {'command': command, 'speed': speed, 'turn': turn}
                            arduino.write(
                                ("<" + str(2) + "," + str(6) + ">").encode())
                        else:
                            arduino.write(
                                ("<" + str(2) + "," + str(5) + ">").encode())
                    else:
                        arduino.write(
                            ("<" + str(2) + "," + str(4) + ">").encode())
                else:
                    arduino.write(("<" + str(3) + "," + str(3) + ">").encode())

        except Exception as e:
            print("No valid lines found: " + str(e))
            arduino.write(("<" + str(2) + "," + str(1) + ">").encode())

        end = time.time()
        #print("FPS: " + str(int(1/(end-start))))
        fps.append(int(1 / (end - start)))
        print("avg_fps: " + str(int(np.mean(fps))))

        cv2.imshow('raw', feed)
        #cv2.imshow('int',gray)
        cv2.waitKey(1)

except:
    arduino.write(("<" + str(0) + "," + str(0) + ">").encode())

arduino.write(("<" + str(0) + "," + str(0) + ">").encode())
cv2.destroyAllWindows()
cap.stop()
Ejemplo n.º 3
0
class Tracking(object):
    def __init__(self):
        if environment.is_mac():
            self.video_capture = cv2.VideoCapture(0)
            self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
            self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        else:  # raspberry pi
            self.video_stream = PiVideoStream().start()
        self.face_cascade = cv2.CascadeClassifier(FACE_CASC_PATH)
        self.smile_cascade = cv2.CascadeClassifier(SMILE_CASC_PATH)
        # self.hand_cascade = cv2.CascadeClassifier(HAND_CASC_PATH)
        self._detect_face = False
        self._detect_smile = False
        atexit.register(self._cleanup)

    def detect_face(self, should):
        self._detect_face = should

    def detect_smile(self, should):
        self._detect_smile = should

    def detect(self):
        if not self._detect_face and self._detect_smile:
            raise "in order to detect smile, must detect face"

        _return = {}
        _return['face'] = False
        _return['smile'] = False

        # Capture frame-by-frame
        if environment.is_mac():
            _, frame = self.video_capture.read()
        else:  # raspberry pi
            frame = self.video_stream.read()

        if frame is None:
            return _return  # camera might not be ready yet

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # hands = self.hand_cascade.detectMultiScale(
        #     gray,
        #     scaleFactor=1.3,
        #     minNeighbors=5,
        #     minSize=(30, 30),
        #     flags=cv2.CASCADE_SCALE_IMAGE
        # )
        #
        # # Draw a rectangle around the faces
        # for (x, y, w, h) in hands:
        #     cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if self._detect_face:
            faces = self.face_cascade.detectMultiScale(
                gray,
                scaleFactor=1.3,
                minNeighbors=5,
                minSize=(60, 60),
                flags=cv2.CASCADE_SCALE_IMAGE)

            # Draw a rectangle around the faces
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # if face detected
            face_detected = hasattr(faces, 'shape')
            _return['face'] = face_detected

            if face_detected and self._detect_smile:
                # detect smile
                # pick largest face
                faces_area = faces[:, 2] * faces[:, 3]
                face = faces[np.argmax(faces_area)]

                # find smile within face
                x, y, w, h = face
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = frame[y:y + h, x:x + w]

                smiles = self.smile_cascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.3,
                    minNeighbors=60,  # the lower, the more sensitive
                    minSize=(10, 10),
                    maxSize=(120, 120),
                    flags=cv2.CASCADE_SCALE_IMAGE)

                # Set region of interest for smiles
                for (x, y, w, h) in smiles:
                    cv2.rectangle(roi_color, (x, y), (x + w, y + h),
                                  (0, 0, 255), 3)

                # if smile detected
                _return['smile'] = hasattr(smiles, 'shape')

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            exit(0)

        return _return

    def _cleanup(self):
        print('cleaning up')
        # When everything is done, release the capture
        if environment.is_mac():
            self.video_capture.release()
        else:  # raspberry pi
            self.video_stream.stop()
        cv2.destroyAllWindows()