Ejemplo n.º 1
0
 def __init__(self):
     if environment.is_mac():
         self.video_capture = cv2.VideoCapture(0)
         self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
         self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
     else:  # raspberry pi
         self.video_stream = PiVideoStream().start()
     self.face_cascade = cv2.CascadeClassifier(FACE_CASC_PATH)
     self.smile_cascade = cv2.CascadeClassifier(SMILE_CASC_PATH)
     # self.hand_cascade = cv2.CascadeClassifier(HAND_CASC_PATH)
     self._detect_face = False
     self._detect_smile = False
     atexit.register(self._cleanup)
Ejemplo n.º 2
0
    def __init__(self, verbose, serial_address, socket_address, max_speed=30):
        super().__init__()

        self.verbose_print = print if verbose else lambda *a, **k: None

        self.verbose_print("Using IP address ", socket_address)
        self.verbose_print("Using serial address ", serial_address)

        self.serial_address = serial_address
        self.socket_address = socket_address
        self.socket = socketio.Client()
        self.running = False
        self.startEvent = Event()
        self.stopEvent = Event()
        self.max_speed = max_speed
        self.ser = serial.Serial(serial_address, baudrate=115200, timeout=0)
        self.camera = PiVideoStream()
Ejemplo n.º 3
0
                    # draw the connecting lines
                    thickness = int(np.sqrt(32 / float(i + 1)) * 2.5)
                    cv2.line(frame,
                             globals()['pts%s' % c_i][i - 1],
                             globals()['pts%s' % c_i][i], (0, 0, 255),
                             thickness)

        # show the frame to our screen and increment the frame counter
        try:
            cv2.imwrite("stream.jpg", frame)
            image = open("stream.jpg", 'rb').read()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')
        except:
            print("no image")


@app.route('/video_feed')
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == '__main__':
    print("new stream")
    camera = PiVideoStream().start()
    time.sleep(0.5)
    print("camera ready")
    app.run(host='0.0.0.0', debug=True, use_reloader=False)
def intersection(line1, line2):
    rho1 = line1[0]
    theta1 = line1[1]
    rho2 = line2[0]
    theta2 = line2[1]
    A = np.array([[np.cos(theta1), np.sin(theta1)],
                  [np.cos(theta2), np.sin(theta2)]])
    b = np.array([[rho1], [rho2]])
    x0, y0 = np.linalg.solve(A, b)
    x0, y0 = int(np.round(x0)), int(np.round(y0))
    return [[x0, y0]]


from camera import PiVideoStream

cap = PiVideoStream().start()
print("Capture started")
arduino = serial.Serial("/dev/ttyUSB0", 9600, timeout=.1)
print("Serial started")

time.sleep(0.5)

cv2.namedWindow('raw', cv2.WINDOW_NORMAL)
cv2.resizeWindow('raw', 320, 240)

arduino.write(("<" + str(0) + "," + str(0) + ">").encode())
print("Motor: All stop")

fps = deque(maxlen=3)
tavg_intersection_x = deque(maxlen=10)
tavg_intersection_y = deque(maxlen=10)
Ejemplo n.º 5
0
 def __init__(self, control):
     super(Car, self).__init__()
     self.live_stream = None
     self.camera = PiVideoStream(resolution=(320, 240), framerate=16)
     self.control = control
     self.driver = HumanDriver() 
Ejemplo n.º 6
0
class Car(object):
    """docstring for Car"""
    def __init__(self, control):
        super(Car, self).__init__()
        self.live_stream = None
        self.camera = PiVideoStream(resolution=(320, 240), framerate=16)
        self.control = control
        self.driver = HumanDriver() 
    
    def exit(self):
        self.camera.stop()
        self.control.stop()
        if self.live_stream:
            self.live_stream.stop()
        print('exit')

    def start(self):
        i2c.setup(mode_motors=3)
        self.control.start()
        self.camera.start()

    @get_sensors
    @record # inputs (camera + sensor) and output
    def drive(self, img, sensors):
        if self.live_stream:
            self.live_stream.send(frame=img, sensors=sensors)

        command = self.control.read()

        if command == 'quit':
            self.exit()
            sys.exit(1)
        elif command == 'stream':
            try:
                if not self.live_stream:
                    self.live_stream = LiveStreamClient().start()
            except Exception as e:
                print('live_stream', e)
        elif command == 'stop':
            i2c.stop()
        
        if command == 'auto_logic_based':
            if not isinstance(self.driver, LogicBasedDriver):
                self.driver = LogicBasedDriver()
        elif command == 'auto_neural_network':
            if not isinstance(self.driver, AutonomousDriver):
                ai = AutonomousDriver().start()
                ai.learn()
                self.driver = ai # utonomousDriver().start()
        else:
            self.driver = HumanDriver()
            pass # human control

        speed, turn = self.driver.action(command, img)

        i2c.set_speed(speed)
        i2c.set_turn(turn)

        # CONSTRAINTS
        for sonar in i2c.SONARS:
            if sonar == i2c.I2C_SONAR_2:
                continue
            if sensors.get(str(sonar), [30])[0] < 20:
                i2c.stop()
        
        return {'command': command, 'speed': speed, 'turn': turn}
Ejemplo n.º 7
0
print("[i] Loading feature extractor:", config['model']['backend'])
print("[+] Trained labels:", config['model']['labels'])
print("[i] Building model... This will take a while... (< 2 mins)")

load_start = time.time()

model = ObjectDetection(backend=config['model']['backend'],
                        input_size=config['model']['input_size'],
                        labels=config['model']['labels'],
                        max_box_per_image=config['model']['max_box_per_image'],
                        anchors=config['model']['anchors'])

print("[i] Model took", (time.time() - load_start), "seconds to load")

print("[c] Starting video capture")
cap = PiVideoStream().start()

print("[i] Loading weights from", weights_path)
model.load_weights(weights_path)


class predictions():
    """
    Streaming inferences independently of camera and UI updates
    Makes use of the following global variables:
      1. current frame from camera stream
      2. currently loaded object detection model
    """
    def __init__(self):
        self.boxes = [
            "can", "bottle", "ken", "grace", "frank", "tim", "shelly"
Ejemplo n.º 8
0
class Tracking(object):
    def __init__(self):
        if environment.is_mac():
            self.video_capture = cv2.VideoCapture(0)
            self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
            self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        else:  # raspberry pi
            self.video_stream = PiVideoStream().start()
        self.face_cascade = cv2.CascadeClassifier(FACE_CASC_PATH)
        self.smile_cascade = cv2.CascadeClassifier(SMILE_CASC_PATH)
        # self.hand_cascade = cv2.CascadeClassifier(HAND_CASC_PATH)
        self._detect_face = False
        self._detect_smile = False
        atexit.register(self._cleanup)

    def detect_face(self, should):
        self._detect_face = should

    def detect_smile(self, should):
        self._detect_smile = should

    def detect(self):
        if not self._detect_face and self._detect_smile:
            raise "in order to detect smile, must detect face"

        _return = {}
        _return['face'] = False
        _return['smile'] = False

        # Capture frame-by-frame
        if environment.is_mac():
            _, frame = self.video_capture.read()
        else:  # raspberry pi
            frame = self.video_stream.read()

        if frame is None:
            return _return  # camera might not be ready yet

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # hands = self.hand_cascade.detectMultiScale(
        #     gray,
        #     scaleFactor=1.3,
        #     minNeighbors=5,
        #     minSize=(30, 30),
        #     flags=cv2.CASCADE_SCALE_IMAGE
        # )
        #
        # # Draw a rectangle around the faces
        # for (x, y, w, h) in hands:
        #     cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if self._detect_face:
            faces = self.face_cascade.detectMultiScale(
                gray,
                scaleFactor=1.3,
                minNeighbors=5,
                minSize=(60, 60),
                flags=cv2.CASCADE_SCALE_IMAGE)

            # Draw a rectangle around the faces
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # if face detected
            face_detected = hasattr(faces, 'shape')
            _return['face'] = face_detected

            if face_detected and self._detect_smile:
                # detect smile
                # pick largest face
                faces_area = faces[:, 2] * faces[:, 3]
                face = faces[np.argmax(faces_area)]

                # find smile within face
                x, y, w, h = face
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = frame[y:y + h, x:x + w]

                smiles = self.smile_cascade.detectMultiScale(
                    roi_gray,
                    scaleFactor=1.3,
                    minNeighbors=60,  # the lower, the more sensitive
                    minSize=(10, 10),
                    maxSize=(120, 120),
                    flags=cv2.CASCADE_SCALE_IMAGE)

                # Set region of interest for smiles
                for (x, y, w, h) in smiles:
                    cv2.rectangle(roi_color, (x, y), (x + w, y + h),
                                  (0, 0, 255), 3)

                # if smile detected
                _return['smile'] = hasattr(smiles, 'shape')

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            exit(0)

        return _return

    def _cleanup(self):
        print('cleaning up')
        # When everything is done, release the capture
        if environment.is_mac():
            self.video_capture.release()
        else:  # raspberry pi
            self.video_stream.stop()
        cv2.destroyAllWindows()
Ejemplo n.º 9
0
    def getForeground(self, frame):
        # apply the background averaging formula:
        # NEW_BACKGROUND = CURRENT_FRAME * ALPHA + OLD_BACKGROUND * (1 - APLHA)
        self.backGroundModel = frame * self.alpha + self.backGroundModel * (
            1 - self.alpha)

        # after the previous operation, the dtype of
        # self.backGroundModel will be changed to a float type
        # therefore we do not pass it to cv2.absdiff directly,
        # instead we acquire a copy of it in the uint8 dtype
        # and pass that to absdiff.

        return cv2.absdiff(self.backGroundModel.astype(np.uint8), frame)


cap = PiVideoStream().start()

time.sleep(0.5)
#cap = cv2.VideoCapture('opencv_test2.mov')

# define the lower and upper boundaries of the "colour"
# ball in the HSV color space
colourLower = (0, 0, 0)
colourUpper = (255, 80, 60)

newframe = False
firstCap = True

while True:
    frame = cap.read()
    rho1 = line1[0]
    theta1 = line1[1]
    rho2 = line2[0]
    theta2 = line2[1]
    A = np.array([
        [np.cos(theta1), np.sin(theta1)],
        [np.cos(theta2), np.sin(theta2)]
    ])
    b = np.array([[rho1], [rho2]])
    x0, y0 = np.linalg.solve(A, b)
    x0, y0 = int(np.round(x0)), int(np.round(y0))
    return [[x0, y0]]

from camera import PiVideoStream

cap = PiVideoStream().start()
arduino = serial.Serial("/dev/ttyUSB0", 9600, timeout=.1)

time.sleep(1)

cv2.namedWindow('raw',cv2.WINDOW_NORMAL)
cv2.resizeWindow('raw', 320,130)

print("all stop")
arduino.write(("<" + str(0) + "," + str(0) + ">").encode())

fps = []

time.sleep(0.5)
input("Press Enter to continue...")
Ejemplo n.º 11
0
	help="max buffer size")
args = vars(ap.parse_args())

# define the lower and upper boundaries of the "green"
# ball in the HSV color space
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
 
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""

camera = PiVideoStream().start()
time.sleep(0.5)
print("camera ready")

# keep looping
while True:
	# grab the current frame
	#(grabbed, frame) = camera.read()
	frame = camera.read()
 
	# resize the frame, blur it, and convert it to the HSV
	# color space
	frame = imutils.resize(frame, width=600)
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
 
Ejemplo n.º 12
0
    print("stopping motors and quitting")
    motor_x.stop()
    motor_y.stop()
    sys.exit(0)


# Run this file to start
if __name__ == '__main__':
    # Set up QApplication
    app = QApplication(sys.argv)

    # Set up gui
    main_window = MainWindow()

    # Set up camera video stream
    vs = PiVideoStream(resolution=RESOLUTION, framerate=FRAME_RATE, use_video_port=USE_VIDEO_PORT)
    vs.start()

    # Set up Stepper motor driver
    pio = pigpio.pi()
    motor_x = Stepper(pio, MM_PER_STEP, NEN_pin=X_NEN_pin, DIR_pin=X_DIR_pin, STP_pin=X_STP_pin)
    motor_y = Stepper(pio, MM_PER_STEP, NEN_pin=Y_NEN_pin, DIR_pin=Y_DIR_pin, STP_pin=Y_STP_pin)

    # Set up well position controller and evaluators
    target_coordinates = (0, 0)  # to be determined
    e1 = (WellBottomFeaturesEvaluator(RESOLUTION, ENABLE_DEBUG_MODE_EVALUATOR, qtui=main_window), 1)
    # e2 = (HoughTransformEvaluator(RESOLUTION, ENABLE_DEBUG_MODE), 1)
    wpc = WellPositionController(SETPOINTS_FILE,
                                 MAX_ALLOWED_ERROR_MM,
                                 motor_x,
                                 motor_y,